Compare commits

...

4 Commits

Author SHA1 Message Date
yuzubot
93636449e7 "Merge Tagged PR 6598" 2022-01-30 13:02:18 +00:00
yuzubot
da2d8feb25 "Merge Tagged PR 7346" 2022-01-30 13:02:17 +00:00
yuzubot
6a1f0458b6 "Merge Tagged PR 7497" 2022-01-30 13:02:17 +00:00
yuzubot
60ae143f02 "Merge Tagged PR 7805" 2022-01-30 13:02:17 +00:00
21 changed files with 221 additions and 25 deletions

View File

@@ -8,7 +8,7 @@ steps:
displayName: 'Install vulkan-sdk'
- script: python -m pip install --upgrade pip conan
displayName: 'Install conan'
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
- script: refreshenv && mkdir build && cd build && cmake -E env CXXFLAGS="/Gw /GA /Gr /Ob2" cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON -DCMAKE_POLICY_DEFAULT_CMP0069=NEW -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
displayName: 'Configure CMake'
- task: MSBuild@1
displayName: 'Build'

View File

@@ -37,7 +37,8 @@ if (MSVC)
add_compile_options(
/MP
/Zi
/Zm200
/Zm300
/Zf
/Zo
/permissive-
/EHsc
@@ -81,7 +82,7 @@ if (MSVC)
add_compile_options("$<$<CONFIG:Release>:/GS->")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
else()
add_compile_options(
-Wall

View File

@@ -87,6 +87,14 @@ static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
}
void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
auto now = std::chrono::steady_clock::now();
auto duration = now.time_since_epoch();
auto nanoseconds = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
if (nanoseconds > expected_cb_time) {
ns_late = nanoseconds - expected_cb_time;
}
if (!IsPlaying()) {
// Ensure we are in playing state before playing the next buffer
sink_stream.Flush();
@@ -121,6 +129,7 @@ void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
ns_late = {};
}
expected_cb_time = nanoseconds + (buffer_release_ns - ns_late);
core_timing.ScheduleEvent(buffer_release_ns - ns_late, release_event, {});
}

View File

@@ -117,13 +117,14 @@ private:
ReleaseCallback release_callback; ///< Buffer release callback for the stream
State state{State::Stopped}; ///< Playback state of the stream
std::shared_ptr<Core::Timing::EventType>
release_event; ///< Core timing release event for the stream
BufferPtr active_buffer; ///< Actively playing buffer in the stream
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
SinkStream& sink_stream; ///< Output sink for the stream
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
std::string name; ///< Name of the stream, must be unique
release_event; ///< Core timing release event for the stream
BufferPtr active_buffer; ///< Actively playing buffer in the stream
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
SinkStream& sink_stream; ///< Output sink for the stream
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
std::string name; ///< Name of the stream, must be unique
std::chrono::nanoseconds expected_cb_time = {}; ///< Estimated time of next callback
};
using StreamPtr = std::shared_ptr<Stream>;

View File

@@ -32,14 +32,14 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
if (!(_a_)) [[unlikely]] { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
} \
while (0)
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
if (!(_a_)) [[unlikely]] { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
} \
while (0)
@@ -70,7 +70,7 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT_OR_EXECUTE(_a_, _b_) \
do { \
ASSERT(_a_); \
if (!(_a_)) { \
if (!(_a_)) [[unlikely]] { \
_b_ \
} \
} while (0)
@@ -79,7 +79,7 @@ assert_noinline_call(const Fn& fn) {
#define ASSERT_OR_EXECUTE_MSG(_a_, _b_, ...) \
do { \
ASSERT_MSG(_a_, __VA_ARGS__); \
if (!(_a_)) { \
if (!(_a_)) [[unlikely]] { \
_b_ \
} \
} while (0)

View File

@@ -131,6 +131,8 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
void DisableGraphicsUniformBuffer(size_t stage, u32 index);
@@ -808,6 +810,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
return;
}
MICROPROFILE_SCOPE(GPU_DownloadMemory);
const bool is_accuracy_normal =
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
u64 total_size_bytes = 0;
@@ -819,6 +823,9 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
buffer.ForEachDownloadRangeAndClear(
cpu_addr, size, [&](u64 range_offset, u64 range_size) {
if (is_accuracy_normal) {
return;
}
const VAddr buffer_addr = buffer.CpuAddr();
const auto add_download = [&](VAddr start, VAddr end) {
const u64 new_offset = start - buffer_addr;
@@ -1417,10 +1424,8 @@ void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
const bool is_accuracy_high =
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::High;
const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue();
if (!is_async && !is_accuracy_high) {
if (!is_async) {
return;
}
uncommitted_ranges.add(base_interval);
@@ -1643,6 +1648,41 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
runtime.CopyBuffer(buffer, upload_staging.buffer, copies);
}
template <class P>
bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
std::span<u8> inlined_buffer) {
const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
if (!is_dirty) {
return false;
}
if (!IsRegionGpuModified(dest_address, copy_size)) {
return false;
}
const IntervalType subtract_interval{dest_address, dest_address + copy_size};
ClearDownload(subtract_interval);
BufferId buffer_id = FindBuffer(dest_address, static_cast<u32>(copy_size));
auto& buffer = slot_buffers[buffer_id];
SynchronizeBuffer(buffer, dest_address, static_cast<u32>(copy_size));
if constexpr (USE_MEMORY_MAPS) {
std::array copies{BufferCopy{
.src_offset = 0,
.dst_offset = buffer.Offset(dest_address),
.size = copy_size,
}};
auto upload_staging = runtime.UploadStagingBuffer(copy_size);
u8* const src_pointer = upload_staging.mapped_span.data();
std::memcpy(src_pointer, inlined_buffer.data(), copy_size);
runtime.CopyBuffer(buffer, upload_staging.buffer, copies);
} else {
buffer.ImmediateUpload(buffer.Offset(dest_address), inlined_buffer);
}
return true;
}
template <class P>
void BufferCache<P>::DownloadBufferMemory(Buffer& buffer) {
DownloadBufferMemory(buffer, buffer.CpuAddr(), buffer.SizeBytes());

View File

@@ -7,6 +7,7 @@
#include "common/assert.h"
#include "video_core/engines/engine_upload.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/textures/decoders.h"
namespace Tegra::Engines::Upload {
@@ -16,6 +17,10 @@ State::State(MemoryManager& memory_manager_, Registers& regs_)
State::~State() = default;
void State::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
rasterizer = rasterizer_;
}
void State::ProcessExec(const bool is_linear_) {
write_offset = 0;
copy_size = regs.line_length_in * regs.line_count;
@@ -32,7 +37,7 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
}
const GPUVAddr address{regs.dest.Address()};
if (is_linear) {
memory_manager.WriteBlock(address, inner_buffer.data(), copy_size);
rasterizer->AccelerateInline2Memory(address, copy_size, inner_buffer);
} else {
UNIMPLEMENTED_IF(regs.dest.z != 0);
UNIMPLEMENTED_IF(regs.dest.depth != 1);

View File

@@ -12,6 +12,10 @@ namespace Tegra {
class MemoryManager;
}
namespace VideoCore {
class RasterizerInterface;
}
namespace Tegra::Engines::Upload {
struct Registers {
@@ -60,6 +64,9 @@ public:
void ProcessExec(bool is_linear_);
void ProcessData(u32 data, bool is_last_call);
/// Binds a rasterizer to this engine.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
private:
u32 write_offset = 0;
u32 copy_size = 0;
@@ -68,6 +75,7 @@ private:
bool is_linear = false;
Registers& regs;
MemoryManager& memory_manager;
VideoCore::RasterizerInterface* rasterizer = nullptr;
};
} // namespace Tegra::Engines::Upload

View File

@@ -22,6 +22,7 @@ KeplerCompute::~KeplerCompute() = default;
void KeplerCompute::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
rasterizer = rasterizer_;
upload_state.BindRasterizer(rasterizer);
}
void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_call) {

View File

@@ -19,6 +19,10 @@ KeplerMemory::KeplerMemory(Core::System& system_, MemoryManager& memory_manager)
KeplerMemory::~KeplerMemory() = default;
void KeplerMemory::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
upload_state.BindRasterizer(rasterizer_);
}
void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
ASSERT_MSG(method < Regs::NUM_REGS,
"Invalid KeplerMemory register, increase the size of the Regs structure");

View File

@@ -22,6 +22,10 @@ namespace Tegra {
class MemoryManager;
}
namespace VideoCore {
class RasterizerInterface;
}
namespace Tegra::Engines {
/**
@@ -38,6 +42,9 @@ public:
explicit KeplerMemory(Core::System& system_, MemoryManager& memory_manager);
~KeplerMemory() override;
/// Binds a rasterizer to this engine.
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
/// Write the value to the register identified by method.
void CallMethod(u32 method, u32 method_argument, bool is_last_call) override;

View File

@@ -31,6 +31,7 @@ Maxwell3D::~Maxwell3D() = default;
void Maxwell3D::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
rasterizer = rasterizer_;
upload_state.BindRasterizer(rasterizer_);
}
void Maxwell3D::InitializeRegisterDefaults() {

View File

@@ -1557,7 +1557,8 @@ private:
static constexpr u32 null_cb_data = 0xFFFFFFFF;
struct CBDataState {
std::array<std::array<u32, 0x4000>, 16> buffer;
static constexpr size_t inline_size = 0x8000;
std::array<std::array<u32, inline_size>, 16> buffer;
u32 current{null_cb_data};
u32 id{null_cb_data};
u32 start_pos{};

View File

@@ -59,6 +59,7 @@ struct GPU::Impl {
maxwell_3d->BindRasterizer(rasterizer);
fermi_2d->BindRasterizer(rasterizer);
kepler_compute->BindRasterizer(rasterizer);
kepler_memory->BindRasterizer(rasterizer);
maxwell_dma->BindRasterizer(rasterizer);
}
@@ -502,8 +503,15 @@ struct GPU::Impl {
case BufferMethods::SemaphoreAddressHigh:
case BufferMethods::SemaphoreAddressLow:
case BufferMethods::SemaphoreSequence:
case BufferMethods::UnkCacheFlush:
case BufferMethods::WrcacheFlush:
break;
case BufferMethods::UnkCacheFlush: {
rasterizer->SyncGuestHost();
break;
}
case BufferMethods::WrcacheFlush: {
rasterizer->SignalReference();
break;
}
case BufferMethods::FenceValue:
break;
case BufferMethods::RefCnt:
@@ -513,7 +521,7 @@ struct GPU::Impl {
ProcessFenceActionMethod();
break;
case BufferMethods::WaitForInterrupt:
ProcessWaitForInterruptMethod();
rasterizer->WaitForIdle();
break;
case BufferMethods::SemaphoreTrigger: {
ProcessSemaphoreTriggerMethod();

View File

@@ -4,6 +4,8 @@
#include <array>
#include <vector>
#include "common/scope_exit.h"
#include "video_core/dirty_flags.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/macro/macro.h"
#include "video_core/macro/macro_hle.h"
@@ -59,6 +61,7 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
maxwell3d.regs.index_array.first = parameters[3];
maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base?
maxwell3d.regs.index_array.count = parameters[1];
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
maxwell3d.regs.vb_element_base = element_base;
maxwell3d.regs.vb_base_instance = base_instance;
maxwell3d.mme_draw.instance_count = instance_count;
@@ -81,10 +84,67 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
}
constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{
// Multidraw Indirect
void HLE_3f5e74b9c9a50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
SCOPE_EXIT({
// Clean everything.
maxwell3d.regs.reg_array[0x446] = 0x0; // vertex id base?
maxwell3d.regs.index_array.count = 0;
maxwell3d.regs.vb_element_base = 0x0;
maxwell3d.regs.vb_base_instance = 0x0;
maxwell3d.mme_draw.instance_count = 0;
maxwell3d.CallMethodFromMME(0x8e3, 0x640);
maxwell3d.CallMethodFromMME(0x8e4, 0x0);
maxwell3d.CallMethodFromMME(0x8e5, 0x0);
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
});
const u32 start_indirect = parameters[0];
const u32 end_indirect = parameters[1];
if (start_indirect >= end_indirect) {
// Nothing to do.
return;
}
const auto topology =
static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[2]);
maxwell3d.regs.draw.topology.Assign(topology);
const u32 padding = parameters[3];
const std::size_t max_draws = parameters[4];
const u32 indirect_words = 5 + padding;
const std::size_t first_draw = start_indirect;
const std::size_t effective_draws = end_indirect - start_indirect;
const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws);
for (std::size_t index = first_draw; index < last_draw; index++) {
const std::size_t base = index * indirect_words + 5;
const u32 num_vertices = parameters[base];
const u32 instance_count = parameters[base + 1];
const u32 first_index = parameters[base + 2];
const u32 base_vertex = parameters[base + 3];
const u32 base_instance = parameters[base + 4];
maxwell3d.regs.index_array.first = first_index;
maxwell3d.regs.reg_array[0x446] = base_vertex;
maxwell3d.regs.index_array.count = num_vertices;
maxwell3d.regs.vb_element_base = base_vertex;
maxwell3d.regs.vb_base_instance = base_instance;
maxwell3d.mme_draw.instance_count = instance_count;
maxwell3d.CallMethodFromMME(0x8e3, 0x640);
maxwell3d.CallMethodFromMME(0x8e4, base_vertex);
maxwell3d.CallMethodFromMME(0x8e5, base_instance);
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
if (maxwell3d.ShouldExecute()) {
maxwell3d.Rasterizer().Draw(true, true);
}
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
}
}
constexpr std::array<std::pair<u64, HLEFunction>, 4> hle_funcs{{
{0x771BB18C62444DA0, &HLE_771BB18C62444DA0},
{0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD},
{0x0217920100488FF7, &HLE_0217920100488FF7},
{0x3f5e74b9c9a50164, &HLE_3f5e74b9c9a50164},
}};
class HLEMacroImpl final : public CachedMacro {
@@ -100,6 +160,7 @@ private:
Engines::Maxwell3D& maxwell3d;
HLEFunction func;
};
} // Anonymous namespace
HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {}

View File

@@ -143,6 +143,8 @@ public:
[[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
void Unmap(GPUVAddr gpu_addr, std::size_t size);
void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
private:
[[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
@@ -153,8 +155,6 @@ private:
void TryLockPage(PageEntry page_entry, std::size_t size);
void TryUnlockPage(PageEntry page_entry, std::size_t size);
void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
bool is_safe) const;
void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,

View File

@@ -123,6 +123,9 @@ public:
[[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
virtual void AccelerateInline2Memory(GPUVAddr address, size_t copy_size,
std::span<u8> memory) = 0;
/// Attempt to use a faster method to display the framebuffer to screen
[[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
VAddr framebuffer_addr, u32 pixel_stride) {

View File

@@ -484,6 +484,28 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
return accelerate_dma;
}
void RasterizerOpenGL::AccelerateInline2Memory(GPUVAddr address, size_t copy_size,
std::span<u8> memory) {
auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
gpu_memory.WriteBlock(address, memory.data(), copy_size);
return;
}
gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
buffer_cache.WriteMemory(*cpu_addr, copy_size);
}
}
{
std::scoped_lock lock_texture{texture_cache.mutex};
texture_cache.WriteMemory(*cpu_addr, copy_size);
}
shader_cache.InvalidateRegion(*cpu_addr, copy_size);
query_cache.InvalidateRegion(*cpu_addr, copy_size);
}
bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
VAddr framebuffer_addr, u32 pixel_stride) {
if (framebuffer_addr == 0) {

View File

@@ -106,6 +106,7 @@ public:
const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInline2Memory(GPUVAddr address, size_t copy_size, std::span<u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,

View File

@@ -548,6 +548,28 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
return accelerate_dma;
}
void RasterizerVulkan::AccelerateInline2Memory(GPUVAddr address, size_t copy_size,
std::span<u8> memory) {
auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
if (!cpu_addr) [[unlikely]] {
gpu_memory.WriteBlock(address, memory.data(), copy_size);
return;
}
gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
{
std::unique_lock<std::mutex> lock{buffer_cache.mutex};
if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
buffer_cache.WriteMemory(*cpu_addr, copy_size);
}
}
{
std::scoped_lock lock_texture{texture_cache.mutex};
texture_cache.WriteMemory(*cpu_addr, copy_size);
}
pipeline_cache.InvalidateRegion(*cpu_addr, copy_size);
query_cache.InvalidateRegion(*cpu_addr, copy_size);
}
bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
VAddr framebuffer_addr, u32 pixel_stride) {
if (!framebuffer_addr) {

View File

@@ -99,6 +99,7 @@ public:
const Tegra::Engines::Fermi2D::Surface& dst,
const Tegra::Engines::Fermi2D::Config& copy_config) override;
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
void AccelerateInline2Memory(GPUVAddr address, size_t copy_size, std::span<u8> memory) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,