Compare commits
1 Commits
master
...
mainline-0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6f3982ab39 |
@@ -68,6 +68,7 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {}
|
||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
system.GPU().ClearCdmaInstance();
|
||||
system.GPU().MemoryManager().InvalidateQueuedCaches();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -193,7 +193,13 @@ NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vec
|
||||
return NvResult::InvalidState;
|
||||
}
|
||||
if (const auto size{RemoveBufferMap(object->dma_map_addr)}; size) {
|
||||
gpu.MemoryManager().Unmap(object->dma_map_addr, *size);
|
||||
if (vic_device) {
|
||||
// UnmapVicFrame defers texture_cache invalidation of the frame address until
|
||||
// the stream is over
|
||||
gpu.MemoryManager().UnmapVicFrame(object->dma_map_addr, *size);
|
||||
} else {
|
||||
gpu.MemoryManager().Unmap(object->dma_map_addr, *size);
|
||||
}
|
||||
} else {
|
||||
// This occurs quite frequently, however does not seem to impact functionality
|
||||
LOG_DEBUG(Service_NVDRV, "invalid offset=0x{:X} dma=0x{:X}", object->addr,
|
||||
|
||||
@@ -160,6 +160,7 @@ protected:
|
||||
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
bool vic_device{};
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
SyncpointManager& syncpoint_manager;
|
||||
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
namespace Service::Nvidia::Devices {
|
||||
nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
||||
SyncpointManager& syncpoint_manager_)
|
||||
: nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
|
||||
|
||||
: nvhost_nvdec_common(system_, std::move(nvmap_dev_), syncpoint_manager_) {
|
||||
vic_device = true;
|
||||
}
|
||||
nvhost_vic::~nvhost_vic() = default;
|
||||
|
||||
NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||
|
||||
@@ -118,6 +118,25 @@ void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
|
||||
.IsSuccess());
|
||||
}
|
||||
|
||||
void MemoryManager::UnmapVicFrame(GPUVAddr gpu_addr, std::size_t size) {
|
||||
if (!size) {
|
||||
return;
|
||||
}
|
||||
|
||||
const std::optional<VAddr> cpu_addr = GpuToCpuAddress(gpu_addr);
|
||||
ASSERT(cpu_addr);
|
||||
rasterizer->InvalidateExceptTextureCache(*cpu_addr, size);
|
||||
cache_invalidate_queue.push_back({*cpu_addr, size});
|
||||
|
||||
UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
|
||||
}
|
||||
|
||||
void MemoryManager::InvalidateQueuedCaches() {
|
||||
for (const auto& entry : cache_invalidate_queue) {
|
||||
rasterizer->InvalidateTextureCache(entry.first, entry.second);
|
||||
}
|
||||
cache_invalidate_queue.clear();
|
||||
}
|
||||
PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
|
||||
return page_table[PageEntryIndex(gpu_addr)];
|
||||
}
|
||||
|
||||
@@ -143,6 +143,14 @@ public:
|
||||
[[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
|
||||
void Unmap(GPUVAddr gpu_addr, std::size_t size);
|
||||
|
||||
/**
|
||||
* Some Decoded NVDEC frames require that texture cache does not get invalidated.
|
||||
* UnmapVicFrame defers the texture cache invalidation until the stream ends
|
||||
* by invoking InvalidateQueuedCaches to invalidate all frame texture caches.
|
||||
*/
|
||||
void UnmapVicFrame(GPUVAddr gpu_addr, std::size_t size);
|
||||
void InvalidateQueuedCaches();
|
||||
|
||||
private:
|
||||
[[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
|
||||
void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
|
||||
|
||||
@@ -77,6 +77,12 @@ public:
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||
virtual void FlushRegion(VAddr addr, u64 size) = 0;
|
||||
|
||||
/// Notify rasterizer to flush the texture cache to Switch memory
|
||||
virtual void InvalidateExceptTextureCache(VAddr addr, u64 size) = 0;
|
||||
|
||||
/// Notify rasterizer to invalidate the texture cache
|
||||
virtual void InvalidateTextureCache(VAddr addr, u64 size) = 0;
|
||||
|
||||
/// Check if the the specified memory area requires flushing to CPU Memory.
|
||||
virtual bool MustFlushRegion(VAddr addr, u64 size) = 0;
|
||||
|
||||
|
||||
@@ -322,6 +322,26 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
|
||||
query_cache.FlushRegion(addr, size);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::InvalidateExceptTextureCache(VAddr addr, u64 size) {
|
||||
if (addr == 0 || size == 0) {
|
||||
return;
|
||||
}
|
||||
shader_cache.InvalidateRegion(addr, size);
|
||||
{
|
||||
std::scoped_lock lock{buffer_cache.mutex};
|
||||
buffer_cache.WriteMemory(addr, size);
|
||||
}
|
||||
query_cache.InvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::InvalidateTextureCache(VAddr addr, u64 size) {
|
||||
if (addr == 0 || size == 0) {
|
||||
return;
|
||||
}
|
||||
std::scoped_lock lock{texture_cache.mutex};
|
||||
texture_cache.UnmapMemory(addr, size);
|
||||
}
|
||||
|
||||
bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) {
|
||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||
if (!Settings::IsGPULevelHigh()) {
|
||||
|
||||
@@ -86,6 +86,8 @@ public:
|
||||
void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
|
||||
void FlushAll() override;
|
||||
void FlushRegion(VAddr addr, u64 size) override;
|
||||
void InvalidateExceptTextureCache(VAddr addr, u64 size) override;
|
||||
void InvalidateTextureCache(VAddr addr, u64 size) override;
|
||||
bool MustFlushRegion(VAddr addr, u64 size) override;
|
||||
void InvalidateRegion(VAddr addr, u64 size) override;
|
||||
void OnCPUWrite(VAddr addr, u64 size) override;
|
||||
|
||||
@@ -311,6 +311,26 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) {
|
||||
query_cache.FlushRegion(addr, size);
|
||||
}
|
||||
|
||||
void Vulkan::RasterizerVulkan::InvalidateExceptTextureCache(VAddr addr, u64 size) {
|
||||
if (addr == 0 || size == 0) {
|
||||
return;
|
||||
}
|
||||
pipeline_cache.InvalidateRegion(addr, size);
|
||||
{
|
||||
std::scoped_lock lock{buffer_cache.mutex};
|
||||
buffer_cache.WriteMemory(addr, size);
|
||||
}
|
||||
query_cache.InvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
void Vulkan::RasterizerVulkan::InvalidateTextureCache(VAddr addr, u64 size) {
|
||||
if (addr == 0 || size == 0) {
|
||||
return;
|
||||
}
|
||||
std::scoped_lock lock{texture_cache.mutex};
|
||||
texture_cache.UnmapMemory(addr, size);
|
||||
}
|
||||
|
||||
bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size) {
|
||||
std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
|
||||
if (!Settings::IsGPULevelHigh()) {
|
||||
|
||||
@@ -79,6 +79,8 @@ public:
|
||||
void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
|
||||
void FlushAll() override;
|
||||
void FlushRegion(VAddr addr, u64 size) override;
|
||||
void InvalidateExceptTextureCache(VAddr addr, u64 size) override;
|
||||
void InvalidateTextureCache(VAddr addr, u64 size) override;
|
||||
bool MustFlushRegion(VAddr addr, u64 size) override;
|
||||
void InvalidateRegion(VAddr addr, u64 size) override;
|
||||
void OnCPUWrite(VAddr addr, u64 size) override;
|
||||
|
||||
Reference in New Issue
Block a user