Compare commits

...

4 Commits

Author SHA1 Message Date
bunnei
a587270489 Merge cda72cde54 into c7f2fb2151 2018-08-23 06:25:14 +00:00
bunnei
cda72cde54 gl_rasterizer_cache: Remove assert for RecreateSurface type. 2018-08-21 01:07:44 -04:00
bunnei
537376dbd4 gl_rasterizer_cache: Implement compressed texture copies. 2018-08-21 01:07:44 -04:00
bunnei
bce9aafd10 gl_rasterizer_cache: Use CPU address for surface cache key. 2018-08-21 01:07:43 -04:00
8 changed files with 102 additions and 123 deletions

View File

@@ -10,6 +10,7 @@
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/nvdrv/devices/nvdevice.h"
#include "video_core/memory_manager.h"
namespace Service::Nvidia::Devices {

View File

@@ -251,8 +251,8 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
return string;
}
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) {
if (gpu_addr == 0) {
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
if (vaddr == 0) {
return;
}
@@ -261,19 +261,8 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
// CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
// assumes the specified GPU address region is contiguous as well.
u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) {
boost::optional<VAddr> maybe_vaddr =
Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(gpu_addr);
// The GPU <-> CPU virtual memory mapping is not 1:1
if (!maybe_vaddr) {
LOG_ERROR(HW_Memory,
"Trying to flush a cached region to an invalid physical address {:016X}",
gpu_addr);
continue;
}
VAddr vaddr = *maybe_vaddr;
u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
if (cached) {
@@ -344,29 +333,19 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
const VAddr overlap_start = std::max(start, region_start);
const VAddr overlap_end = std::min(end, region_end);
const std::vector<Tegra::GPUVAddr> gpu_addresses =
system_instance.GPU().memory_manager->CpuToGpuAddress(overlap_start);
if (gpu_addresses.empty()) {
return;
}
const u64 overlap_size = overlap_end - overlap_start;
for (const auto& gpu_address : gpu_addresses) {
auto& rasterizer = system_instance.Renderer().Rasterizer();
switch (mode) {
case FlushMode::Flush:
rasterizer.FlushRegion(gpu_address, overlap_size);
break;
case FlushMode::Invalidate:
rasterizer.InvalidateRegion(gpu_address, overlap_size);
break;
case FlushMode::FlushAndInvalidate:
rasterizer.FlushAndInvalidateRegion(gpu_address, overlap_size);
break;
}
auto& rasterizer = system_instance.Renderer().Rasterizer();
switch (mode) {
case FlushMode::Flush:
rasterizer.FlushRegion(overlap_start, overlap_size);
break;
case FlushMode::Invalidate:
rasterizer.InvalidateRegion(overlap_start, overlap_size);
break;
case FlushMode::FlushAndInvalidate:
rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
break;
}
};

View File

@@ -11,7 +11,6 @@
#include <boost/icl/interval_map.hpp>
#include "common/common_types.h"
#include "core/memory_hook.h"
#include "video_core/memory_manager.h"
namespace Kernel {
class Process;
@@ -179,7 +178,7 @@ enum class FlushMode {
/**
* Mark each page touching the region as cached.
*/
void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached);
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached);
/**
* Flushes and invalidates any externally cached rasterizer resources touching the given virtual

View File

@@ -27,14 +27,14 @@ public:
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0;
virtual void FlushRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0;
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
/// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0
virtual bool AccelerateDisplayTransfer(const void* config) {

View File

@@ -545,17 +545,17 @@ void RasterizerOpenGL::FlushAll() {
res_cache.FlushRegion(0, Kernel::VMManager::MAX_ADDRESS);
}
void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) {
void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.FlushRegion(addr, size);
}
void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.InvalidateRegion(addr, size);
}
void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) {
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.FlushRegion(addr, size);
res_cache.InvalidateRegion(addr, size);

View File

@@ -39,9 +39,9 @@ public:
void Clear() override;
void NotifyMaxwellRegisterChanged(u32 method) override;
void FlushAll() override;
void FlushRegion(Tegra::GPUVAddr addr, u64 size) override;
void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override;
void FlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
bool AccelerateDisplayTransfer(const void* config) override;
bool AccelerateTextureCopy(const void* config) override;
bool AccelerateFill(const void* config) override;

View File

@@ -35,9 +35,9 @@ struct FormatTuple {
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
const Tegra::Texture::FullTextureInfo& config) {
const auto& gpu = Core::System::GetInstance().GPU();
SurfaceParams params{};
params.addr = config.tic.Address();
params.cpu_addr = *gpu.memory_manager->GpuToCpuAddress(config.tic.Address());
params.is_tiled = config.tic.IsTiled();
params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
params.pixel_format =
@@ -55,9 +55,9 @@ struct FormatTuple {
/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(
const Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig& config) {
const auto& gpu = Core::System::GetInstance().GPU();
SurfaceParams params{};
params.addr = config.Address();
params.cpu_addr = *gpu.memory_manager->GpuToCpuAddress(config.Address());
params.is_tiled = true;
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
@@ -75,9 +75,9 @@ struct FormatTuple {
/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(u32 zeta_width, u32 zeta_height,
Tegra::GPUVAddr zeta_address,
Tegra::DepthFormat format) {
const auto& gpu = Core::System::GetInstance().GPU();
SurfaceParams params{};
params.addr = zeta_address;
params.cpu_addr = *gpu.memory_manager->GpuToCpuAddress(zeta_address);
params.is_tiled = true;
params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight;
params.pixel_format = PixelFormatFromDepthFormat(format);
@@ -167,11 +167,6 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType
return format;
}
VAddr SurfaceParams::GetCpuAddr() const {
const auto& gpu = Core::System::GetInstance().GPU();
return *gpu.memory_manager->GpuToCpuAddress(addr);
}
static bool IsPixelFormatASTC(PixelFormat format) {
switch (format) {
case PixelFormat::ASTC_2D_4X4:
@@ -216,33 +211,28 @@ static bool IsFormatBCn(PixelFormat format) {
}
template <bool morton_to_gl, PixelFormat format>
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer,
Tegra::GPUVAddr addr) {
void MortonCopy(u32 stride, u32 block_height, u32 height, std::vector<u8>& gl_buffer, VAddr addr) {
constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT;
constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format);
const auto& gpu = Core::System::GetInstance().GPU();
if (morton_to_gl) {
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
// pixel values.
const u32 tile_size{IsFormatBCn(format) ? 4U : 1U};
const std::vector<u8> data =
Tegra::Texture::UnswizzleTexture(*gpu.memory_manager->GpuToCpuAddress(addr), tile_size,
bytes_per_pixel, stride, height, block_height);
const std::vector<u8> data = Tegra::Texture::UnswizzleTexture(
addr, tile_size, bytes_per_pixel, stride, height, block_height);
const size_t size_to_copy{std::min(gl_buffer.size(), data.size())};
gl_buffer.assign(data.begin(), data.begin() + size_to_copy);
} else {
// TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should
// check the configuration for this and perform more generic un/swizzle
LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!");
VideoCore::MortonCopyPixels128(
stride, height, bytes_per_pixel, gl_bytes_per_pixel,
Memory::GetPointer(*gpu.memory_manager->GpuToCpuAddress(addr)), gl_buffer.data(),
morton_to_gl);
VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel,
Memory::GetPointer(addr), gl_buffer.data(), morton_to_gl);
}
}
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
SurfaceParams::MaxPixelFormat>
morton_to_gl_fns = {
// clang-format off
@@ -297,7 +287,7 @@ static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPU
// clang-format on
};
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, Tegra::GPUVAddr),
static constexpr std::array<void (*)(u32, u32, u32, std::vector<u8>&, VAddr),
SurfaceParams::MaxPixelFormat>
gl_to_morton_fns = {
// clang-format off
@@ -532,7 +522,7 @@ MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64
void CachedSurface::LoadGLBuffer() {
ASSERT(params.type != SurfaceType::Fill);
const u8* const texture_src_data = Memory::GetPointer(params.GetCpuAddr());
const u8* const texture_src_data = Memory::GetPointer(params.cpu_addr);
ASSERT(texture_src_data);
@@ -545,7 +535,7 @@ void CachedSurface::LoadGLBuffer() {
gl_buffer.resize(copy_size);
morton_to_gl_fns[static_cast<size_t>(params.pixel_format)](
params.width, params.block_height, params.height, gl_buffer, params.addr);
params.width, params.block_height, params.height, gl_buffer, params.cpu_addr);
} else {
const u8* const texture_src_data_end = texture_src_data + copy_size;
@@ -557,7 +547,7 @@ void CachedSurface::LoadGLBuffer() {
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
void CachedSurface::FlushGLBuffer() {
u8* const dst_buffer = Memory::GetPointer(params.GetCpuAddr());
u8* const dst_buffer = Memory::GetPointer(params.cpu_addr);
ASSERT(dst_buffer);
ASSERT(gl_buffer.size() ==
@@ -572,7 +562,7 @@ void CachedSurface::FlushGLBuffer() {
std::memcpy(dst_buffer, gl_buffer.data(), params.size_in_bytes);
} else {
gl_to_morton_fns[static_cast<size_t>(params.pixel_format)](
params.width, params.block_height, params.height, gl_buffer, params.addr);
params.width, params.block_height, params.height, gl_buffer, params.cpu_addr);
}
}
@@ -756,17 +746,12 @@ void RasterizerCacheOpenGL::FlushSurface(const Surface& surface) {
}
Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) {
if (params.addr == 0 || params.height * params.width == 0) {
if (params.cpu_addr == 0 || params.height * params.width == 0) {
return {};
}
const auto& gpu = Core::System::GetInstance().GPU();
// Don't try to create any entries in the cache if the address of the texture is invalid.
if (gpu.memory_manager->GpuToCpuAddress(params.addr) == boost::none)
return {};
// Look up surface in the cache based on address
const auto& search{surface_cache.find(params.addr)};
const auto& search{surface_cache.find(params.cpu_addr)};
Surface surface;
if (search != surface_cache.end()) {
surface = search->second;
@@ -799,9 +784,6 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
const SurfaceParams& new_params) {
// Verify surface is compatible for blitting
const auto& params{surface->GetSurfaceParams()};
ASSERT(params.type == new_params.type);
ASSERT_MSG(params.GetCompressionFactor(params.pixel_format) == 1,
"Compressed texture reinterpretation is not supported");
// Create a new surface with the new parameters, and blit the previous surface to it
Surface new_surface{std::make_shared<CachedSurface>(new_params)};
@@ -818,9 +800,12 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo.handle);
glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB);
glGetTextureImage(surface->Texture().handle, 0, source_format.format, source_format.type,
params.SizeInBytes(), nullptr);
if (source_format.compressed) {
glGetCompressedTextureImage(surface->Texture().handle, 0, params.SizeInBytes(), nullptr);
} else {
glGetTextureImage(surface->Texture().handle, 0, source_format.format, source_format.type,
params.SizeInBytes(), nullptr);
}
// If the new texture is bigger than the previous one, we need to fill in the rest with data
// from the CPU.
if (params.SizeInBytes() < new_params.SizeInBytes()) {
@@ -834,10 +819,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
"reinterpretation but the texture is tiled.");
}
size_t remaining_size = new_params.SizeInBytes() - params.SizeInBytes();
auto address = Core::System::GetInstance().GPU().memory_manager->GpuToCpuAddress(
new_params.addr + params.SizeInBytes());
std::vector<u8> data(remaining_size);
Memory::ReadBlock(*address, data.data(), data.size());
Memory::ReadBlock(new_params.cpu_addr + params.SizeInBytes(), data.data(), data.size());
glBufferSubData(GL_PIXEL_PACK_BUFFER, params.SizeInBytes(), remaining_size, data.data());
}
@@ -846,9 +829,32 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& surface,
const auto& dest_rect{new_params.GetRect()};
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo.handle);
glTextureSubImage2D(
new_surface->Texture().handle, 0, 0, 0, static_cast<GLsizei>(dest_rect.GetWidth()),
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format, dest_format.type, nullptr);
if (dest_format.compressed) {
OpenGLState cur_state = OpenGLState::GetCurState();
GLuint old_tex = cur_state.texture_units[0].texture_2d;
cur_state.texture_units[0].texture_2d = new_surface->Texture().handle;
cur_state.Apply();
// Ensure no bad interactions with GL_UNPACK_ALIGNMENT
ASSERT(new_params.width * CachedSurface::GetGLBytesPerPixel(new_params.pixel_format) % 4 ==
0);
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.width));
glActiveTexture(GL_TEXTURE0);
glCompressedTexImage2D(GL_TEXTURE_2D, 0, dest_format.internal_format,
static_cast<GLsizei>(dest_rect.GetWidth()),
static_cast<GLsizei>(dest_rect.GetHeight()), 0,
static_cast<GLsizei>(new_params.SizeInBytes()), nullptr);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
cur_state.texture_units[0].texture_2d = old_tex;
cur_state.Apply();
} else {
glTextureSubImage2D(new_surface->Texture().handle, 0, 0, 0,
static_cast<GLsizei>(dest_rect.GetWidth()),
static_cast<GLsizei>(dest_rect.GetHeight()), dest_format.format,
dest_format.type, nullptr);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
pbo.Release();
@@ -870,9 +876,8 @@ Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
std::vector<Surface> surfaces;
for (const auto& surface : surface_cache) {
const auto& params = surface.second->GetSurfaceParams();
const VAddr surface_cpu_addr = params.GetCpuAddr();
if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + params.size_in_bytes)) {
ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported");
if (cpu_addr >= params.cpu_addr && cpu_addr < (params.cpu_addr + params.size_in_bytes)) {
ASSERT_MSG(cpu_addr == params.cpu_addr, "overlapping surfaces are unsupported");
surfaces.push_back(surface.second);
}
}
@@ -886,13 +891,13 @@ Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr cpu_addr) const {
return surfaces[0];
}
void RasterizerCacheOpenGL::FlushRegion(Tegra::GPUVAddr /*addr*/, size_t /*size*/) {
void RasterizerCacheOpenGL::FlushRegion(VAddr /*addr*/, size_t /*size*/) {
// TODO(bunnei): This is unused in the current implementation of the rasterizer cache. We should
// probably implement this in the future, but for now, the `use_accurate_framebufers` setting
// can be used to always flush.
}
void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, size_t size) {
void RasterizerCacheOpenGL::InvalidateRegion(VAddr addr, size_t size) {
for (auto iter = surface_cache.cbegin(); iter != surface_cache.cend();) {
const auto& surface{iter->second};
const auto& params{surface->GetSurfaceParams()};
@@ -907,27 +912,27 @@ void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, size_t size)
void RasterizerCacheOpenGL::RegisterSurface(const Surface& surface) {
const auto& params{surface->GetSurfaceParams()};
const auto& search{surface_cache.find(params.addr)};
const auto& search{surface_cache.find(params.cpu_addr)};
if (search != surface_cache.end()) {
// Registered already
return;
}
surface_cache[params.addr] = surface;
UpdatePagesCachedCount(params.addr, params.size_in_bytes, 1);
surface_cache[params.cpu_addr] = surface;
UpdatePagesCachedCount(params.cpu_addr, params.size_in_bytes, 1);
}
void RasterizerCacheOpenGL::UnregisterSurface(const Surface& surface) {
const auto& params{surface->GetSurfaceParams()};
const auto& search{surface_cache.find(params.addr)};
const auto& search{surface_cache.find(params.cpu_addr)};
if (search == surface_cache.end()) {
// Unregistered already
return;
}
UpdatePagesCachedCount(params.addr, params.size_in_bytes, -1);
UpdatePagesCachedCount(params.cpu_addr, params.size_in_bytes, -1);
surface_cache.erase(search);
}
@@ -936,10 +941,10 @@ constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
return boost::make_iterator_range(map.equal_range(interval));
}
void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
const u64 num_pages = ((addr + size - 1) >> Tegra::MemoryManager::PAGE_BITS) -
(addr >> Tegra::MemoryManager::PAGE_BITS) + 1;
const u64 page_start = addr >> Tegra::MemoryManager::PAGE_BITS;
void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const u64 num_pages =
((addr + size - 1) >> Memory::PAGE_BITS) - (addr >> Memory::PAGE_BITS) + 1;
const u64 page_start = addr >> Memory::PAGE_BITS;
const u64 page_end = page_start + num_pages;
// Interval maps will erase segments if count reaches 0, so if delta is negative we have to
@@ -952,10 +957,8 @@ void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 siz
const auto interval = pair.first & pages_interval;
const int count = pair.second;
const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval)
<< Tegra::MemoryManager::PAGE_BITS;
const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval)
<< Tegra::MemoryManager::PAGE_BITS;
const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS;
const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS;
const u64 interval_size = interval_end_addr - interval_start_addr;
if (delta > 0 && count == delta)

View File

@@ -628,12 +628,9 @@ struct SurfaceParams {
GetFormatBpp(pixel_format) / CHAR_BIT;
}
/// Returns the CPU virtual address for this surface
VAddr GetCpuAddr() const;
/// Returns true if the specified region overlaps with this surface's region in Switch memory
bool IsOverlappingRegion(Tegra::GPUVAddr region_addr, size_t region_size) const {
return addr <= (region_addr + region_size) && region_addr <= (addr + size_in_bytes);
bool IsOverlappingRegion(VAddr region_addr, size_t region_size) const {
return cpu_addr <= (region_addr + region_size) && region_addr <= (cpu_addr + size_in_bytes);
}
/// Creates SurfaceParams from a texture configuration
@@ -649,9 +646,9 @@ struct SurfaceParams {
Tegra::DepthFormat format);
bool operator==(const SurfaceParams& other) const {
return std::tie(addr, is_tiled, block_height, pixel_format, component_type, type, width,
return std::tie(cpu_addr, is_tiled, block_height, pixel_format, component_type, type, width,
height, unaligned_height, size_in_bytes) ==
std::tie(other.addr, other.is_tiled, other.block_height, other.pixel_format,
std::tie(other.cpu_addr, other.is_tiled, other.block_height, other.pixel_format,
other.component_type, other.type, other.width, other.height,
other.unaligned_height, other.size_in_bytes);
}
@@ -666,7 +663,7 @@ struct SurfaceParams {
std::tie(other.pixel_format, other.type, other.cache_width, other.cache_height);
}
Tegra::GPUVAddr addr;
VAddr cpu_addr;
bool is_tiled;
u32 block_height;
PixelFormat pixel_format;
@@ -734,10 +731,10 @@ public:
Surface TryFindFramebufferSurface(VAddr cpu_addr) const;
/// Write any cached resources overlapping the region back to memory (if dirty)
void FlushRegion(Tegra::GPUVAddr addr, size_t size);
void FlushRegion(VAddr addr, size_t size);
/// Mark the specified region as being invalidated
void InvalidateRegion(Tegra::GPUVAddr addr, size_t size);
void InvalidateRegion(VAddr addr, size_t size);
private:
void LoadSurface(const Surface& surface);
@@ -753,9 +750,9 @@ private:
void UnregisterSurface(const Surface& surface);
/// Increase/decrease the number of surface in pages touching the specified region
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta);
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta);
std::unordered_map<Tegra::GPUVAddr, Surface> surface_cache;
std::unordered_map<VAddr, Surface> surface_cache;
PageMap cached_pages;
OGLFramebuffer read_framebuffer;