Compare commits
7 Commits
mainline-0
...
mainline-0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a45ae9d0f2 | ||
|
|
d8d4949b2c | ||
|
|
64c1cf621b | ||
|
|
029da3a591 | ||
|
|
8b50fcac94 | ||
|
|
3716f43e0b | ||
|
|
0f3cbfe86e |
@@ -8,7 +8,7 @@ steps:
|
||||
displayName: 'Install vulkan-sdk'
|
||||
- script: python -m pip install --upgrade pip conan
|
||||
displayName: 'Install conan'
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
|
||||
- script: refreshenv && mkdir build && cd build && cmake -E env CXXFLAGS="/Gw /GA /Gr /Ob2" cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON -DCMAKE_POLICY_DEFAULT_CMP0069=NEW -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_BUNDLED_SDL2=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} -DCMAKE_BUILD_TYPE=Release .. && cd ..
|
||||
displayName: 'Configure CMake'
|
||||
- task: MSBuild@1
|
||||
displayName: 'Build'
|
||||
|
||||
@@ -37,7 +37,8 @@ if (MSVC)
|
||||
add_compile_options(
|
||||
/MP
|
||||
/Zi
|
||||
/Zm200
|
||||
/Zm300
|
||||
/Zf
|
||||
/Zo
|
||||
/permissive-
|
||||
/EHsc
|
||||
@@ -81,7 +82,7 @@ if (MSVC)
|
||||
add_compile_options("$<$<CONFIG:Release>:/GS->")
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG /MANIFEST:NO" CACHE STRING "" FORCE)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
||||
else()
|
||||
add_compile_options(
|
||||
-Wall
|
||||
|
||||
@@ -87,6 +87,14 @@ static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
|
||||
}
|
||||
|
||||
void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
auto duration = now.time_since_epoch();
|
||||
auto nanoseconds = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
|
||||
|
||||
if (nanoseconds > expected_cb_time) {
|
||||
ns_late = nanoseconds - expected_cb_time;
|
||||
}
|
||||
|
||||
if (!IsPlaying()) {
|
||||
// Ensure we are in playing state before playing the next buffer
|
||||
sink_stream.Flush();
|
||||
@@ -121,6 +129,7 @@ void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
|
||||
ns_late = {};
|
||||
}
|
||||
|
||||
expected_cb_time = nanoseconds + (buffer_release_ns - ns_late);
|
||||
core_timing.ScheduleEvent(buffer_release_ns - ns_late, release_event, {});
|
||||
}
|
||||
|
||||
|
||||
@@ -117,13 +117,14 @@ private:
|
||||
ReleaseCallback release_callback; ///< Buffer release callback for the stream
|
||||
State state{State::Stopped}; ///< Playback state of the stream
|
||||
std::shared_ptr<Core::Timing::EventType>
|
||||
release_event; ///< Core timing release event for the stream
|
||||
BufferPtr active_buffer; ///< Actively playing buffer in the stream
|
||||
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
|
||||
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
|
||||
SinkStream& sink_stream; ///< Output sink for the stream
|
||||
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
|
||||
std::string name; ///< Name of the stream, must be unique
|
||||
release_event; ///< Core timing release event for the stream
|
||||
BufferPtr active_buffer; ///< Actively playing buffer in the stream
|
||||
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
|
||||
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
|
||||
SinkStream& sink_stream; ///< Output sink for the stream
|
||||
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
|
||||
std::string name; ///< Name of the stream, must be unique
|
||||
std::chrono::nanoseconds expected_cb_time = {}; ///< Estimated time of next callback
|
||||
};
|
||||
|
||||
using StreamPtr = std::shared_ptr<Stream>;
|
||||
|
||||
@@ -32,14 +32,14 @@ assert_noinline_call(const Fn& fn) {
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
} \
|
||||
while (0)
|
||||
@@ -70,7 +70,7 @@ assert_noinline_call(const Fn& fn) {
|
||||
#define ASSERT_OR_EXECUTE(_a_, _b_) \
|
||||
do { \
|
||||
ASSERT(_a_); \
|
||||
if (!(_a_)) { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
_b_ \
|
||||
} \
|
||||
} while (0)
|
||||
@@ -79,7 +79,7 @@ assert_noinline_call(const Fn& fn) {
|
||||
#define ASSERT_OR_EXECUTE_MSG(_a_, _b_, ...) \
|
||||
do { \
|
||||
ASSERT_MSG(_a_, __VA_ARGS__); \
|
||||
if (!(_a_)) { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
_b_ \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -285,72 +285,142 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
// Verify that the source memory is normal heap.
|
||||
KMemoryState src_state{};
|
||||
KMemoryPermission src_perm{};
|
||||
std::size_t num_src_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr,
|
||||
std::addressof(num_src_allocator_blocks), src_address, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None));
|
||||
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
|
||||
if (IsRegionMapped(dst_addr, size)) {
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
KPageLinkedList page_linked_list;
|
||||
AddRegionToPages(src_addr, num_pages, page_linked_list);
|
||||
// Verify that the destination memory is unmapped.
|
||||
std::size_t num_dst_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size,
|
||||
KMemoryState::All, KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::None,
|
||||
KMemoryAttribute::None));
|
||||
|
||||
// Map the code memory.
|
||||
{
|
||||
auto block_guard = detail::ScopeExit(
|
||||
[&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
|
||||
// Determine the number of pages being operated on.
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
|
||||
OperationType::ChangePermissions));
|
||||
CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None));
|
||||
// Create page groups for the memory being mapped.
|
||||
KPageLinkedList pg;
|
||||
AddRegionToPages(src_address, num_pages, pg);
|
||||
|
||||
block_guard.Cancel();
|
||||
// Reprotect the source as kernel-read/not mapped.
|
||||
const KMemoryPermission new_perm = static_cast<KMemoryPermission>(
|
||||
KMemoryPermission::KernelRead | KMemoryPermission::NotMapped);
|
||||
R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
|
||||
|
||||
// Ensure that we unprotect the source pages on failure.
|
||||
auto unprot_guard = SCOPE_GUARD({
|
||||
ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
|
||||
.IsSuccess());
|
||||
});
|
||||
|
||||
// Map the alias pages.
|
||||
R_TRY(MapPages(dst_address, pg, new_perm));
|
||||
|
||||
// We successfully mapped the alias pages, so we don't need to unprotect the src pages on
|
||||
// failure.
|
||||
unprot_guard.Cancel();
|
||||
|
||||
// Apply the memory block updates.
|
||||
block_manager->Update(src_address, num_pages, src_state, new_perm,
|
||||
KMemoryAttribute::Locked);
|
||||
block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
|
||||
KMemoryAttribute::None);
|
||||
}
|
||||
|
||||
block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None,
|
||||
KMemoryAttribute::Locked);
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
if (!size) {
|
||||
return ResultSuccess;
|
||||
// Verify that the source memory is locked normal heap.
|
||||
std::size_t num_src_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked));
|
||||
|
||||
// Verify that the destination memory is aliasable code.
|
||||
std::size_t num_dst_allocator_blocks{};
|
||||
R_TRY(this->CheckMemoryStateContiguous(
|
||||
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||
|
||||
// Determine whether any pages being unmapped are code.
|
||||
bool any_code_pages = false;
|
||||
{
|
||||
KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
|
||||
while (true) {
|
||||
// Get the memory info.
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
// Check if the memory has code flag.
|
||||
if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
|
||||
any_code_pages = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if we're done.
|
||||
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
// Ensure that we maintain the instruction cache.
|
||||
bool reprotected_pages = false;
|
||||
SCOPE_EXIT({
|
||||
if (reprotected_pages && any_code_pages) {
|
||||
system.InvalidateCpuInstructionCacheRange(dst_address, size);
|
||||
}
|
||||
});
|
||||
|
||||
CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size,
|
||||
KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
// Unmap.
|
||||
{
|
||||
// Determine the number of pages being operated on.
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
KMemoryState state{};
|
||||
CASCADE_CODE(CheckMemoryState(
|
||||
&state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias,
|
||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None));
|
||||
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
// Unmap the aliased copy of the pages.
|
||||
R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
|
||||
block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite);
|
||||
// Try to set the permissions for the source pages back to what they should be.
|
||||
R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
|
||||
OperationType::ChangePermissions));
|
||||
|
||||
system.InvalidateCpuInstructionCacheRange(dst_addr, size);
|
||||
// Apply the memory block updates.
|
||||
block_manager->Update(dst_address, num_pages, KMemoryState::None);
|
||||
block_manager->Update(src_address, num_pages, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite);
|
||||
|
||||
// Note that we reprotected pages.
|
||||
reprotected_pages = true;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -36,8 +36,8 @@ public:
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
@@ -253,7 +253,9 @@ public:
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||
ASSERT(backing_addr);
|
||||
@@ -275,10 +277,6 @@ private:
|
||||
return is_aslr_enabled;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
|
||||
@@ -288,7 +288,7 @@ public:
|
||||
}
|
||||
|
||||
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
||||
constexpr std::size_t padding_size{4 * Kernel::PageSize};
|
||||
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||
|
||||
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||
@@ -308,31 +308,69 @@ public:
|
||||
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
||||
}
|
||||
|
||||
VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const {
|
||||
VAddr addr{};
|
||||
const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
|
||||
Kernel::PageBits};
|
||||
do {
|
||||
addr = page_table.GetAliasCodeRegionStart() +
|
||||
(Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits);
|
||||
} while (!page_table.IsInsideAddressSpace(addr, size) ||
|
||||
page_table.IsInsideHeapRegion(addr, size) ||
|
||||
page_table.IsInsideAliasRegion(addr, size));
|
||||
return addr;
|
||||
ResultCode GetAvailableMapRegion(Kernel::KPageTable& page_table, u64 size, VAddr& out_addr) {
|
||||
size = Common::AlignUp(size, Kernel::PageSize);
|
||||
size += page_table.GetNumGuardPages() * Kernel::PageSize * 4;
|
||||
|
||||
auto is_region_available = [&](VAddr addr) {
|
||||
const auto end_addr = addr + size;
|
||||
while (addr < end_addr) {
|
||||
if (system.Memory().IsValidVirtualAddress(addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!page_table.IsInsideAddressSpace(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page_table.IsInsideHeapRegion(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page_table.IsInsideAliasRegion(out_addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
addr += Kernel::PageSize;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
bool succeeded = false;
|
||||
const auto map_region_end =
|
||||
page_table.GetAliasCodeRegionStart() + page_table.GetAliasCodeRegionSize();
|
||||
while (current_map_addr < map_region_end) {
|
||||
if (is_region_available(current_map_addr)) {
|
||||
succeeded = true;
|
||||
break;
|
||||
}
|
||||
current_map_addr += 0x100000;
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
UNREACHABLE_MSG("Out of address space!");
|
||||
return Kernel::ResultOutOfMemory;
|
||||
}
|
||||
|
||||
out_addr = current_map_addr;
|
||||
current_map_addr += size;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr baseAddress,
|
||||
u64 size) const {
|
||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||
auto& page_table{process->PageTable()};
|
||||
const VAddr addr{GetRandomMapRegion(page_table, size)};
|
||||
const ResultCode result{page_table.MapCodeMemory(addr, baseAddress, size)};
|
||||
ResultVal<VAddr> MapProcessCodeMemory(Kernel::KProcess* process, VAddr base_addr, u64 size) {
|
||||
auto& page_table{process->PageTable()};
|
||||
VAddr addr{};
|
||||
|
||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||
R_TRY(GetAvailableMapRegion(page_table, size, addr));
|
||||
|
||||
const ResultCode result{page_table.MapCodeMemory(addr, base_addr, size)};
|
||||
if (result == Kernel::ResultInvalidCurrentMemory) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CASCADE_CODE(result);
|
||||
R_TRY(result);
|
||||
|
||||
if (ValidateRegionForMap(page_table, addr, size)) {
|
||||
return addr;
|
||||
@@ -343,7 +381,7 @@ public:
|
||||
}
|
||||
|
||||
ResultVal<VAddr> MapNro(Kernel::KProcess* process, VAddr nro_addr, std::size_t nro_size,
|
||||
VAddr bss_addr, std::size_t bss_size, std::size_t size) const {
|
||||
VAddr bss_addr, std::size_t bss_size, std::size_t size) {
|
||||
for (std::size_t retry = 0; retry < MAXIMUM_MAP_RETRIES; retry++) {
|
||||
auto& page_table{process->PageTable()};
|
||||
VAddr addr{};
|
||||
@@ -597,6 +635,7 @@ public:
|
||||
LOG_WARNING(Service_LDR, "(STUBBED) called");
|
||||
|
||||
initialized = true;
|
||||
current_map_addr = system.CurrentProcess()->PageTable().GetAliasCodeRegionStart();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@@ -607,6 +646,7 @@ private:
|
||||
|
||||
std::map<VAddr, NROInfo> nro;
|
||||
std::map<VAddr, std::vector<SHA256Hash>> nrr;
|
||||
VAddr current_map_addr{};
|
||||
|
||||
bool IsValidNROHash(const SHA256Hash& hash) const {
|
||||
return std::any_of(nrr.begin(), nrr.end(), [&hash](const auto& p) {
|
||||
|
||||
@@ -207,7 +207,7 @@ private:
|
||||
int index) const;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
mutable std::mutex mutex_callback;
|
||||
mutable std::recursive_mutex mutex_callback;
|
||||
bool configuring{false};
|
||||
const std::string input_engine;
|
||||
int last_callback_key = 0;
|
||||
|
||||
@@ -1469,19 +1469,29 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
|
||||
overlap_ids.push_back(overlap_id);
|
||||
overlap.Pick();
|
||||
const VAddr overlap_cpu_addr = overlap.CpuAddr();
|
||||
bool goes_left = false;
|
||||
if (overlap_cpu_addr < begin) {
|
||||
goes_left = true;
|
||||
cpu_addr = begin = overlap_cpu_addr;
|
||||
}
|
||||
end = std::max(end, overlap_cpu_addr + overlap.SizeBytes());
|
||||
|
||||
const VAddr overlap_end = overlap_cpu_addr + overlap.SizeBytes();
|
||||
bool goes_right = false;
|
||||
if (overlap_end > end) {
|
||||
goes_right = true;
|
||||
end = overlap_end;
|
||||
}
|
||||
stream_score += overlap.StreamScore();
|
||||
if (stream_score > STREAM_LEAP_THRESHOLD && !has_stream_leap) {
|
||||
// When this memory region has been joined a bunch of times, we assume it's being used
|
||||
// as a stream buffer. Increase the size to skip constantly recreating buffers.
|
||||
has_stream_leap = true;
|
||||
begin -= PAGE_SIZE * 256;
|
||||
cpu_addr = begin;
|
||||
end += PAGE_SIZE * 256;
|
||||
if (goes_right) {
|
||||
begin -= PAGE_SIZE * 256;
|
||||
cpu_addr = begin;
|
||||
}
|
||||
if (goes_left) {
|
||||
end += PAGE_SIZE * 256;
|
||||
}
|
||||
}
|
||||
}
|
||||
return OverlapResult{
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
#include "common/scope_exit.h"
|
||||
#include "video_core/dirty_flags.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/macro/macro.h"
|
||||
#include "video_core/macro/macro_hle.h"
|
||||
@@ -59,6 +61,7 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
|
||||
maxwell3d.regs.index_array.first = parameters[3];
|
||||
maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base?
|
||||
maxwell3d.regs.index_array.count = parameters[1];
|
||||
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||
maxwell3d.regs.vb_element_base = element_base;
|
||||
maxwell3d.regs.vb_base_instance = base_instance;
|
||||
maxwell3d.mme_draw.instance_count = instance_count;
|
||||
@@ -81,10 +84,67 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
|
||||
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
|
||||
}
|
||||
|
||||
constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{
|
||||
// Multidraw Indirect
|
||||
void HLE_3f5e74b9c9a50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
|
||||
SCOPE_EXIT({
|
||||
// Clean everything.
|
||||
maxwell3d.regs.reg_array[0x446] = 0x0; // vertex id base?
|
||||
maxwell3d.regs.index_array.count = 0;
|
||||
maxwell3d.regs.vb_element_base = 0x0;
|
||||
maxwell3d.regs.vb_base_instance = 0x0;
|
||||
maxwell3d.mme_draw.instance_count = 0;
|
||||
maxwell3d.CallMethodFromMME(0x8e3, 0x640);
|
||||
maxwell3d.CallMethodFromMME(0x8e4, 0x0);
|
||||
maxwell3d.CallMethodFromMME(0x8e5, 0x0);
|
||||
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
|
||||
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||
});
|
||||
const u32 start_indirect = parameters[0];
|
||||
const u32 end_indirect = parameters[1];
|
||||
if (start_indirect >= end_indirect) {
|
||||
// Nothing to do.
|
||||
return;
|
||||
}
|
||||
const auto topology =
|
||||
static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[2]);
|
||||
maxwell3d.regs.draw.topology.Assign(topology);
|
||||
const u32 padding = parameters[3];
|
||||
const std::size_t max_draws = parameters[4];
|
||||
|
||||
const u32 indirect_words = 5 + padding;
|
||||
const std::size_t first_draw = start_indirect;
|
||||
const std::size_t effective_draws = end_indirect - start_indirect;
|
||||
const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws);
|
||||
|
||||
for (std::size_t index = first_draw; index < last_draw; index++) {
|
||||
const std::size_t base = index * indirect_words + 5;
|
||||
const u32 num_vertices = parameters[base];
|
||||
const u32 instance_count = parameters[base + 1];
|
||||
const u32 first_index = parameters[base + 2];
|
||||
const u32 base_vertex = parameters[base + 3];
|
||||
const u32 base_instance = parameters[base + 4];
|
||||
maxwell3d.regs.index_array.first = first_index;
|
||||
maxwell3d.regs.reg_array[0x446] = base_vertex;
|
||||
maxwell3d.regs.index_array.count = num_vertices;
|
||||
maxwell3d.regs.vb_element_base = base_vertex;
|
||||
maxwell3d.regs.vb_base_instance = base_instance;
|
||||
maxwell3d.mme_draw.instance_count = instance_count;
|
||||
maxwell3d.CallMethodFromMME(0x8e3, 0x640);
|
||||
maxwell3d.CallMethodFromMME(0x8e4, base_vertex);
|
||||
maxwell3d.CallMethodFromMME(0x8e5, base_instance);
|
||||
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||
if (maxwell3d.ShouldExecute()) {
|
||||
maxwell3d.Rasterizer().Draw(true, true);
|
||||
}
|
||||
maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr std::array<std::pair<u64, HLEFunction>, 4> hle_funcs{{
|
||||
{0x771BB18C62444DA0, &HLE_771BB18C62444DA0},
|
||||
{0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD},
|
||||
{0x0217920100488FF7, &HLE_0217920100488FF7},
|
||||
{0x3f5e74b9c9a50164, &HLE_3f5e74b9c9a50164},
|
||||
}};
|
||||
|
||||
class HLEMacroImpl final : public CachedMacro {
|
||||
@@ -100,6 +160,7 @@ private:
|
||||
Engines::Maxwell3D& maxwell3d;
|
||||
HLEFunction func;
|
||||
};
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {}
|
||||
|
||||
Reference in New Issue
Block a user