Compare commits

...

3 Commits

Author SHA1 Message Date
yuzubot
ef31574602 "Merge Tagged PR 11689" 2023-10-19 12:03:10 +00:00
yuzubot
41efd2ab55 "Merge Tagged PR 11748" 2023-10-19 12:03:09 +00:00
yuzubot
bbc8b888f6 "Merge Tagged PR 11806" 2023-10-19 12:03:08 +00:00
41 changed files with 676 additions and 513 deletions

View File

@@ -19,6 +19,7 @@ cmake .. \
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
-DENABLE_QT_TRANSLATION=ON \
-DUSE_DISCORD_PRESENCE=ON \
-DYUZU_CRASH_DUMPS=ON \
-DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} \
-DYUZU_USE_BUNDLED_FFMPEG=ON \
-GNinja

View File

@@ -23,6 +23,7 @@ cmake .. \
-DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} \
-DYUZU_USE_BUNDLED_FFMPEG=ON \
-DYUZU_ENABLE_LTO=ON \
-DYUZU_CRASH_DUMPS=ON \
-GNinja
ninja

View File

@@ -17,7 +17,6 @@ cmake .. \
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
-DENABLE_QT_TRANSLATION=ON \
-DUSE_CCACHE=ON \
-DYUZU_CRASH_DUMPS=ON \
-DYUZU_USE_BUNDLED_SDL2=OFF \
-DYUZU_USE_EXTERNAL_SDL2=OFF \
-DYUZU_TESTS=OFF \

View File

@@ -3,4 +3,4 @@
[codespell]
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES,./src/android/app/src/main/res
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,vas,zink

3
.gitmodules vendored
View File

@@ -58,3 +58,6 @@
[submodule "VulkanMemoryAllocator"]
path = externals/VulkanMemoryAllocator
url = https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git
[submodule "breakpad"]
path = externals/breakpad
url = https://github.com/yuzu-emu/breakpad.git

View File

@@ -52,7 +52,7 @@ option(YUZU_DOWNLOAD_ANDROID_VVL "Download validation layer binary for android"
CMAKE_DEPENDENT_OPTION(YUZU_ROOM "Compile LDN room server" ON "NOT ANDROID" OFF)
CMAKE_DEPENDENT_OPTION(YUZU_CRASH_DUMPS "Compile Windows crash dump (Minidump) support" OFF "WIN32" OFF)
CMAKE_DEPENDENT_OPTION(YUZU_CRASH_DUMPS "Compile crash dump (Minidump) support" OFF "WIN32 OR LINUX" OFF)
option(YUZU_USE_BUNDLED_VCPKG "Use vcpkg for yuzu dependencies" "${MSVC}")
@@ -139,9 +139,6 @@ if (YUZU_USE_BUNDLED_VCPKG)
if (YUZU_TESTS)
list(APPEND VCPKG_MANIFEST_FEATURES "yuzu-tests")
endif()
if (YUZU_CRASH_DUMPS)
list(APPEND VCPKG_MANIFEST_FEATURES "dbghelp")
endif()
if (ENABLE_WEB_SERVICE)
list(APPEND VCPKG_MANIFEST_FEATURES "web-service")
endif()
@@ -550,6 +547,18 @@ if (NOT YUZU_USE_BUNDLED_FFMPEG)
find_package(FFmpeg 4.3 REQUIRED QUIET COMPONENTS ${FFmpeg_COMPONENTS})
endif()
if (WIN32 AND YUZU_CRASH_DUMPS)
set(BREAKPAD_VER "breakpad-c89f9dd")
download_bundled_external("breakpad/" ${BREAKPAD_VER} BREAKPAD_PREFIX)
set(BREAKPAD_CLIENT_INCLUDE_DIR "${BREAKPAD_PREFIX}/include")
set(BREAKPAD_CLIENT_LIBRARY "${BREAKPAD_PREFIX}/lib/libbreakpad_client.lib")
add_library(libbreakpad_client INTERFACE IMPORTED)
target_link_libraries(libbreakpad_client INTERFACE "${BREAKPAD_CLIENT_LIBRARY}")
target_include_directories(libbreakpad_client INTERFACE "${BREAKPAD_CLIENT_INCLUDE_DIR}")
endif()
# Prefer the -pthread flag on Linux.
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
@@ -569,13 +578,6 @@ elseif (WIN32)
# PSAPI is the Process Status API
set(PLATFORM_LIBRARIES ${PLATFORM_LIBRARIES} psapi imm32 version)
endif()
if (YUZU_CRASH_DUMPS)
find_library(DBGHELP_LIBRARY dbghelp)
if ("${DBGHELP_LIBRARY}" STREQUAL "DBGHELP_LIBRARY-NOTFOUND")
message(FATAL_ERROR "YUZU_CRASH_DUMPS enabled but dbghelp library not found")
endif()
endif()
elseif (CMAKE_SYSTEM_NAME MATCHES "^(Linux|kFreeBSD|GNU|SunOS)$")
set(PLATFORM_LIBRARIES rt)
endif()

View File

@@ -185,3 +185,105 @@ if (ANDROID)
add_subdirectory(libadrenotools)
endif()
endif()
# Breakpad
# https://github.com/microsoft/vcpkg/blob/master/ports/breakpad/CMakeLists.txt
if (YUZU_CRASH_DUMPS AND NOT TARGET libbreakpad_client)
set(BREAKPAD_WIN32_DEFINES
NOMINMAX
UNICODE
WIN32_LEAN_AND_MEAN
_CRT_SECURE_NO_WARNINGS
_CRT_SECURE_NO_DEPRECATE
_CRT_NONSTDC_NO_DEPRECATE
)
# libbreakpad
add_library(libbreakpad STATIC)
file(GLOB_RECURSE LIBBREAKPAD_SOURCES breakpad/src/processor/*.cc)
file(GLOB_RECURSE LIBDISASM_SOURCES breakpad/src/third_party/libdisasm/*.c)
list(FILTER LIBBREAKPAD_SOURCES EXCLUDE REGEX "_unittest|_selftest|synth_minidump|/tests|/testdata|/solaris|microdump_stackwalk|minidump_dump|minidump_stackwalk")
if (WIN32)
list(FILTER LIBBREAKPAD_SOURCES EXCLUDE REGEX "/linux|/mac|/android")
target_compile_definitions(libbreakpad PRIVATE ${BREAKPAD_WIN32_DEFINES})
target_include_directories(libbreakpad PRIVATE "${CMAKE_GENERATOR_INSTANCE}/DIA SDK/include")
elseif (APPLE)
list(FILTER LIBBREAKPAD_SOURCES EXCLUDE REGEX "/linux|/windows|/android")
else()
list(FILTER LIBBREAKPAD_SOURCES EXCLUDE REGEX "/mac|/windows|/android")
endif()
target_sources(libbreakpad PRIVATE ${LIBBREAKPAD_SOURCES} ${LIBDISASM_SOURCES})
target_include_directories(libbreakpad
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/breakpad/src
${CMAKE_CURRENT_SOURCE_DIR}/breakpad/src/third_party/libdisasm
)
# libbreakpad_client
add_library(libbreakpad_client STATIC)
file(GLOB LIBBREAKPAD_COMMON_SOURCES breakpad/src/common/*.cc breakpad/src/common/*.c breakpad/src/client/*.cc)
if (WIN32)
file(GLOB_RECURSE LIBBREAKPAD_CLIENT_SOURCES breakpad/src/client/windows/*.cc breakpad/src/common/windows/*.cc)
list(FILTER LIBBREAKPAD_COMMON_SOURCES EXCLUDE REGEX "language.cc|path_helper.cc|stabs_to_module.cc|stabs_reader.cc|minidump_file_writer.cc")
target_include_directories(libbreakpad_client PRIVATE "${CMAKE_GENERATOR_INSTANCE}/DIA SDK/include")
target_compile_definitions(libbreakpad_client PRIVATE ${BREAKPAD_WIN32_DEFINES})
elseif (APPLE)
target_compile_definitions(libbreakpad_client PRIVATE HAVE_MACH_O_NLIST_H)
file(GLOB_RECURSE LIBBREAKPAD_CLIENT_SOURCES breakpad/src/client/mac/*.cc breakpad/src/common/mac/*.cc)
list(APPEND LIBBREAKPAD_CLIENT_SOURCES breakpad/src/common/mac/MachIPC.mm)
else()
target_compile_definitions(libbreakpad_client PUBLIC -DHAVE_A_OUT_H)
file(GLOB_RECURSE LIBBREAKPAD_CLIENT_SOURCES breakpad/src/client/linux/*.cc breakpad/src/common/linux/*.cc)
endif()
list(APPEND LIBBREAKPAD_CLIENT_SOURCES ${LIBBREAKPAD_COMMON_SOURCES})
list(FILTER LIBBREAKPAD_CLIENT_SOURCES EXCLUDE REGEX "/sender|/tests|/unittests|/testcases|_unittest|_test")
target_sources(libbreakpad_client PRIVATE ${LIBBREAKPAD_CLIENT_SOURCES})
target_include_directories(libbreakpad_client PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/breakpad/src)
if (WIN32)
target_link_libraries(libbreakpad_client PRIVATE wininet.lib)
elseif (APPLE)
find_library(CoreFoundation_FRAMEWORK CoreFoundation)
target_link_libraries(libbreakpad_client PRIVATE ${CoreFoundation_FRAMEWORK})
else()
find_library(PTHREAD_LIBRARIES pthread)
target_compile_definitions(libbreakpad_client PRIVATE HAVE_GETCONTEXT=1)
if (PTHREAD_LIBRARIES)
target_link_libraries(libbreakpad_client PRIVATE ${PTHREAD_LIBRARIES})
endif()
endif()
# Host tools for symbol processing
if (LINUX)
find_package(ZLIB REQUIRED)
add_executable(minidump_stackwalk breakpad/src/processor/minidump_stackwalk.cc)
target_link_libraries(minidump_stackwalk PRIVATE libbreakpad libbreakpad_client)
add_executable(dump_syms
breakpad/src/common/dwarf_cfi_to_module.cc
breakpad/src/common/dwarf_cu_to_module.cc
breakpad/src/common/dwarf_line_to_module.cc
breakpad/src/common/dwarf_range_list_handler.cc
breakpad/src/common/language.cc
breakpad/src/common/module.cc
breakpad/src/common/path_helper.cc
breakpad/src/common/stabs_reader.cc
breakpad/src/common/stabs_to_module.cc
breakpad/src/common/dwarf/bytereader.cc
breakpad/src/common/dwarf/dwarf2diehandler.cc
breakpad/src/common/dwarf/dwarf2reader.cc
breakpad/src/common/dwarf/elf_reader.cc
breakpad/src/common/linux/crc32.cc
breakpad/src/common/linux/dump_symbols.cc
breakpad/src/common/linux/elf_symbols_to_module.cc
breakpad/src/common/linux/elfutils.cc
breakpad/src/common/linux/file_id.cc
breakpad/src/common/linux/linux_libc_support.cc
breakpad/src/common/linux/memory_mapped_file.cc
breakpad/src/common/linux/safe_readlink.cc
breakpad/src/tools/linux/dump_syms/dump_syms.cc)
target_link_libraries(dump_syms PRIVATE libbreakpad_client ZLIB::ZLIB)
endif()
endif()

1
externals/breakpad vendored Submodule

Submodule externals/breakpad added at c89f9dddc7

View File

@@ -13,6 +13,7 @@
#define AMIIBO_DIR "amiibo"
#define CACHE_DIR "cache"
#define CONFIG_DIR "config"
#define CRASH_DUMPS_DIR "crash_dumps"
#define DUMP_DIR "dump"
#define KEYS_DIR "keys"
#define LOAD_DIR "load"

View File

@@ -119,6 +119,7 @@ public:
GenerateYuzuPath(YuzuPath::AmiiboDir, yuzu_path / AMIIBO_DIR);
GenerateYuzuPath(YuzuPath::CacheDir, yuzu_path_cache);
GenerateYuzuPath(YuzuPath::ConfigDir, yuzu_path_config);
GenerateYuzuPath(YuzuPath::CrashDumpsDir, yuzu_path / CRASH_DUMPS_DIR);
GenerateYuzuPath(YuzuPath::DumpDir, yuzu_path / DUMP_DIR);
GenerateYuzuPath(YuzuPath::KeysDir, yuzu_path / KEYS_DIR);
GenerateYuzuPath(YuzuPath::LoadDir, yuzu_path / LOAD_DIR);

View File

@@ -15,6 +15,7 @@ enum class YuzuPath {
AmiiboDir, // Where Amiibo backups are stored.
CacheDir, // Where cached filesystem data is stored.
ConfigDir, // Where config files are stored.
CrashDumpsDir, // Where crash dumps are stored.
DumpDir, // Where dumped data is stored.
KeysDir, // Where key files are stored.
LoadDir, // Where cheat/mod files are stored.

View File

@@ -505,7 +505,6 @@ struct Values {
linkage, false, "use_auto_stub", Category::Debugging, Specialization::Default, false};
Setting<bool> enable_all_controllers{linkage, false, "enable_all_controllers",
Category::Debugging};
Setting<bool> create_crash_dumps{linkage, false, "create_crash_dumps", Category::Debugging};
Setting<bool> perform_vulkan_check{linkage, true, "perform_vulkan_check", Category::Debugging};
// Miscellaneous

View File

@@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize ==
/// memory.
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
KVirtualAddress slab_addr) {
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
slab_addr -= memory_layout.GetSlabRegion().GetAddress();
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
}
@@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
const KMemoryRegion& slab_region = memory_layout.GetSlabRegion();
KVirtualAddress address = slab_region.GetAddress();
// Clear the slab region.
// TODO: implement access to kernel VAs.
// std::memset(device_ptr, 0, slab_region.GetSize());
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;

View File

@@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
MainMemoryAddress);
}
static inline size_t GetInitialProcessBinarySize() {
return InitialProcessBinarySizeMax;
}
} // namespace Kernel

View File

@@ -36,6 +36,7 @@ enum class KMemoryState : u32 {
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
FlagLinearMapped = (1 << 26),
FlagCanPermissionLock = (1 << 27),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
@@ -50,12 +51,16 @@ enum class KMemoryState : u32 {
FlagLinearMapped,
Free = static_cast<u32>(Svc::MemoryState::Free),
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
IoRegister =
static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
FlagCanCodeMemory | FlagCanPermissionLock,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
@@ -65,7 +70,8 @@ enum class KMemoryState : u32 {
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory |
FlagCanPermissionLock,
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -73,7 +79,7 @@ enum class KMemoryState : u32 {
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped,
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
@@ -94,7 +100,7 @@ enum class KMemoryState : u32 {
NonDeviceIpc =
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
Kernel = static_cast<u32>(Svc::MemoryState::Kernel),
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
@@ -105,34 +111,36 @@ enum class KMemoryState : u32 {
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc |
FlagCanUseNonDeviceIpc,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001);
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817);
enum class KMemoryPermission : u8 {
None = 0,
@@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 {
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked),
SetMask = Uncached,
SetMask = Uncached | PermissionLocked,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
@@ -261,6 +270,10 @@ struct KMemoryInfo {
return m_state;
}
constexpr Svc::MemoryState GetSvcState() const {
return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask);
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
@@ -326,6 +339,10 @@ public:
return this->GetEndAddress() - 1;
}
constexpr KMemoryState GetState() const {
return m_memory_state;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
@@ -443,6 +460,13 @@ public:
}
}
constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) {
ASSERT(False(mask & KMemoryAttribute::IpcLocked));
ASSERT(False(mask & KMemoryAttribute::DeviceShared));
m_attribute = (m_attribute & ~mask) | attr;
}
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));

View File

@@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
}
// Update block state.
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
it->Update(state, perm, attr, it->GetAddress() == address,
static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
@@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
KProcessAddress address, size_t num_pages,
KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attr) {
KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
@@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
}
// Update block state.
it->Update(state, perm, attr, false, 0, 0);
it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
@@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator,
KProcessAddress address, size_t num_pages,
KMemoryAttribute mask, KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if ((it->GetAttribute() & mask) != attr) {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != GetInteger(cur_address)) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->UpdateAttribute(mask, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
// If we already have the right attributes, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
// Debug.
bool KMemoryBlockManager::CheckState() const {
// Loop over every block, ensuring that we are sorted and coalesced.

View File

@@ -115,7 +115,11 @@ public:
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr);
iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(

View File

@@ -137,11 +137,9 @@ public:
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
}
KVirtualAddress GetSlabRegionAddress() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
.GetAddress();
const KMemoryRegion& GetSlabRegion() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab));
}
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
}

View File

@@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
// Free each region to its corresponding heap.
size_t reserved_sizes[MaxManagerCount] = {};
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
const size_t ini_size = GetInitialProcessBinarySize();
const KPhysicalAddress ini_end = ini_start + ini_size;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
@@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
}
// Open/reserve the ini memory.
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
manager.OpenFirst(ini_start, ini_size / PageSize);
reserved_sizes[it.GetAttributes()] += ini_size;
// Free memory after the ini to the heap.
if (ini_last != cur_last) {
ASSERT(cur_end != 0);
manager.Free(ini_end, cur_end - ini_end);
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
}
} else {
// Ensure there's no partial overlap with the ini image.

View File

@@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown =
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(
KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() ==
(0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
constexpr inline auto KMemoryRegionType_DramReservedEarly =
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
@@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition =
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
// UNUSED: .Derive(4, 1);
// UNUSED: .Derive(4, 2);
constexpr inline const auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(
KMemoryRegionAttr_CarveoutProtected);
constexpr inline auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
constexpr inline const auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.Derive(4, 3);
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
(0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline auto KMemoryRegionType_DramApplicationPool =
KMemoryRegionType_DramUserPool.Derive(4, 0);
@@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
constexpr inline auto KMemoryRegionType_DramSystemPool =
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
(0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
(0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
(0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
// UNUSED: .DeriveSparse(2, 2, 0);
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
// UNUSED: .Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown =
KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32));
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92));
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
// UNUSED: .Derive(4, 3);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A);
// NOTE: For unknown reason, the pools are derived out-of-order here.
// It's worth eventually trying to understand why Nintendo made this choice.
// UNUSED: .Derive(6, 0);
// UNUSED: .Derive(6, 1);
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A);
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
@@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp =
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View File

@@ -183,12 +183,17 @@ private:
class KScopedPageGroup {
public:
explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) {
if (m_pg) {
m_pg->Open();
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true)
: KScopedPageGroup(std::addressof(gp), not_first) {}
~KScopedPageGroup() {
if (m_pg) {
m_pg->Close();

View File

@@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress
R_TRY(this->CheckMemoryStateContiguous(
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::None));
KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
// Determine whether any pages being unmapped are code.
bool any_code_pages = false;
@@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
{
// Create a page group for the current mapping range.
KPageGroup cur_pg(m_kernel, m_block_info_manager);
{
ON_RESULT_FAILURE_2 {
cur_pg.OpenFirst();
cur_pg.Close();
};
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
size_t remain_pages = map_pages;
while (remain_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Add whatever we can to the current block.
const size_t cur_pages = std::min(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr +
((pg_pages - cur_pages) * PageSize),
cur_pages));
// Advance.
remain_pages -= cur_pages;
pg_pages -= cur_pages;
}
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
OperationType::MapFirst, pg_phys_addr));
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
// Map the pages.
R_TRY(this->Operate(cur_address, map_pages, cur_pg,
OperationType::MapFirstGroup));
}
}
@@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
m_memory_block_manager.UpdateIfMatch(
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
address == this->GetAliasRegionStart()
? KMemoryBlockDisableMergeAttribute::Normal
: KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
@@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
// Iterate over the memory, unmapping as we go.
auto it = m_memory_block_manager.FindIterator(cur_address);
const auto clear_merge_attr =
(it->GetState() == KMemoryState::Normal &&
it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
? KMemoryBlockDisableMergeAttribute::Normal
: KMemoryBlockDisableMergeAttribute::None;
while (true) {
// Check that the iterator is valid.
ASSERT(it != m_memory_block_manager.end());
@@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
clear_merge_attr);
// We succeeded.
R_SUCCEED();
@@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
@@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
@@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
size_t num_allocator_blocks;
constexpr auto AttributeTestMask =
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
R_TRY(this->CheckMemoryState(
std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
const KMemoryState state_test_mask =
static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
: 0) |
((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
: 0));
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size, state_test_mask, state_test_mask,
KMemoryPermission::None, KMemoryPermission::None,
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
// Create an update allocator.
Result allocator_result{ResultSuccess};
@@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// Determine the new attribute.
const KMemoryAttribute new_attr =
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
static_cast<KMemoryAttribute>(attr & mask)));
// Perform operation.
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
// If we need to, perform a change attribute operation.
if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
// Perform operation.
R_TRY(this->Operate(addr, num_pages, old_perm,
OperationType::ChangePermissionsAndRefreshAndFlush, 0));
}
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
new_attr, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
static_cast<KMemoryAttribute>(mask),
static_cast<KMemoryAttribute>(attr));
R_SUCCEED();
}
@@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
// Set whether the locked memory was io.
*out_is_io = old_state == KMemoryState::Io;
*out_is_io =
static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
R_SUCCEED();
}
@@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr
ASSERT(num_pages == page_group.GetNumPages());
switch (operation) {
case OperationType::MapGroup: {
case OperationType::MapGroup:
case OperationType::MapFirstGroup: {
// We want to maintain a new reference to every page in the group.
KScopedPageGroup spg(page_group);
KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
for (const auto& node : page_group) {
const size_t size{node.GetNumPages() * PageSize};
@@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
break;
}
case OperationType::MapFirst:
case OperationType::Map: {
ASSERT(map_addr);
ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
@@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
// Open references to pages, if we should.
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
if (operation == OperationType::MapFirst) {
m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
} else {
m_kernel.MemoryManager().Open(map_addr, num_pages);
}
m_kernel.MemoryManager().Open(map_addr, num_pages);
}
break;
}
@@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
}
case OperationType::ChangePermissions:
case OperationType::ChangePermissionsAndRefresh:
case OperationType::ChangePermissionsAndRefreshAndFlush:
break;
default:
ASSERT(false);
@@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
}
}
KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return m_address_space_start;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
return m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
return m_alias_region_start;
case KMemoryState::Stack:
case Svc::MemoryState::Stack:
return m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
case Svc::MemoryState::Static:
case Svc::MemoryState::ThreadLocal:
return m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
return m_code_region_start;
default:
UNREACHABLE();
}
}
size_t KPageTable::GetRegionSize(KMemoryState state) const {
size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return m_address_space_end - m_address_space_start;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
return m_heap_region_end - m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
return m_alias_region_end - m_alias_region_start;
case KMemoryState::Stack:
case Svc::MemoryState::Stack:
return m_stack_region_end - m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
case Svc::MemoryState::Static:
case Svc::MemoryState::ThreadLocal:
return m_kernel_map_region_end - m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return m_alias_code_region_end - m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
return m_code_region_end - m_code_region_start;
default:
UNREACHABLE();
}
}
bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
const KProcessAddress end = addr + size;
const KProcessAddress last = end - 1;
@@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
m_alias_region_start == m_alias_region_end);
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return is_in_region;
case KMemoryState::Io:
case KMemoryState::Static:
case KMemoryState::Code:
case KMemoryState::CodeData:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Stack:
case KMemoryState::ThreadLocal:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Static:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Stack:
case Svc::MemoryState::ThreadLocal:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return is_in_region && !is_in_heap && !is_in_alias;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
ASSERT(is_in_heap);
return is_in_region && !is_in_alias;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
ASSERT(is_in_alias);
return is_in_region && !is_in_heap;
default:
@@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryBlockManager::const_iterator it,
KProcessAddress last_addr, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
(Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
// Validate all blocks in the range have correct state.
const KMemoryState first_state = info.m_state;
const KMemoryPermission first_perm = info.m_permission;
@@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
info = it->GetMemoryInfo();
}
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
(Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
// Write output state.
if (out_state != nullptr) {
*out_state = first_state;
@@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
if (out_attr != nullptr) {
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
}
// If the end address isn't aligned, we need a block.
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
const size_t blocks_for_end_align =
(Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
? 1
: 0;
*out_blocks_needed = blocks_for_end_align;
}
R_SUCCEED();
}
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Check memory state.
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
// If the start address isn't aligned, we need a block.
if (out_blocks_needed != nullptr &&
Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
++(*out_blocks_needed);
}
R_SUCCEED();
}

View File

@@ -126,8 +126,6 @@ public:
return m_block_info_manager;
}
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
@@ -162,6 +160,21 @@ public:
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
const KPageGroup& pg);
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
size_t GetRegionSize(Svc::MemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
KProcessAddress GetRegionAddress(KMemoryState state) const {
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
size_t GetRegionSize(KMemoryState state) const {
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
return this->CanContain(addr, size,
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
protected:
struct PageLinkedList {
private:
@@ -204,12 +217,13 @@ protected:
private:
enum class OperationType : u32 {
Map = 0,
MapFirst = 1,
MapGroup = 2,
MapGroup = 1,
MapFirstGroup = 2,
Unmap = 3,
ChangePermissions = 4,
ChangePermissionsAndRefresh = 5,
Separate = 6,
ChangePermissionsAndRefreshAndFlush = 6,
Separate = 7,
};
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
@@ -228,8 +242,6 @@ private:
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
OperationType operation, KPhysicalAddress map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
@@ -250,6 +262,13 @@ private:
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,

View File

@@ -623,14 +623,33 @@ struct KernelCore::Impl {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Insert a physical region for the secure applet memory.
const auto secure_applet_end_phys_addr =
slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize,
KMemoryRegionType_DramKernelSecureAppletMemory));
}
// Insert a physical region for the unknown debug2 region.
constexpr size_t SecureUnknownRegionSize = 0;
const size_t secure_unknown_size = SecureUnknownRegionSize;
const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size;
if constexpr (SecureUnknownRegionSize > 0) {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(secure_applet_end_phys_addr), secure_unknown_size,
KMemoryRegionType_DramKernelSecureUnknown));
}
// Determine size available for kernel page table heaps, requiring > 8 MB.
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_end_phys_addr), page_table_heap_size,
GetInteger(secure_unknown_end_phys_addr), page_table_heap_size,
KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear

View File

@@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Validate the attribute and mask.
constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
constexpr u32 SupportedMask =
static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked);
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
// Check that permission locked is either being set or not masked.
R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) ==
(static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked),
ResultInvalidCombination);
// Validate that the region is in range for the current process.
auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);

View File

@@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 {
IpcLocked = (1 << 1),
DeviceShared = (1 << 2),
Uncached = (1 << 3),
PermissionLocked = (1 << 4),
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);

View File

@@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
// Get bounds of where mapping is possible.
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
const auto state = Kernel::KMemoryState::Io;
const auto state = Kernel::KMemoryState::IoMemory;
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
std::mt19937_64 rng{process->GetRandomEntropy(0)};

View File

@@ -86,7 +86,10 @@ public:
uncommitted_operations.emplace_back(std::move(func));
}
pending_operations.emplace_back(std::move(uncommitted_operations));
QueueFence(new_fence);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
QueueFence(new_fence);
}
if (!delay_fence) {
func();
}

View File

@@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
const bool use_accelerated =
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
const bool is_srgb = use_accelerated && screen_info.is_srgb;
RenderScreenshot(*framebuffer, use_accelerated);
Frame* frame = present_manager.GetRenderFrame();
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
scheduler.Flush(*frame->render_ready);
present_manager.Present(frame);
{
std::scoped_lock lock{rasterizer.LockCaches()};
RenderScreenshot(*framebuffer, use_accelerated);
Frame* frame = present_manager.GetRenderFrame();
blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb);
scheduler.Flush(*frame->render_ready);
present_manager.Present(frame);
}
gpu.RendererFrameEndNotify();
rasterizer.TickFrame();

View File

@@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
if (!pipeline) {
return;
}
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
// update engine as channel may be different.
pipeline->SetEngine(maxwell3d, gpu_memory);
pipeline->Configure(is_indexed);
@@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() {
}
void RasterizerVulkan::FlushCommands() {
std::scoped_lock lock{LockCaches()};
if (draw_counter == 0) {
return;
}
@@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() {
if ((++draw_counter & 7) != 7) {
return;
}
std::scoped_lock lock{LockCaches()};
if (draw_counter < DRAWS_TO_DISPATCH) {
// Send recorded tasks to the worker thread
scheduler.DispatchWork();
@@ -1499,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
CreateChannel(channel);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.CreateChannel(channel);
buffer_cache.CreateChannel(channel);
}
@@ -1512,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
const s32 channel_id = channel.bind_id;
BindToChannel(channel_id);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.BindToChannel(channel_id);
buffer_cache.BindToChannel(channel_id);
}
@@ -1525,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
EraseChannel(channel_id);
{
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
std::scoped_lock lock{LockCaches()};
texture_cache.EraseChannel(channel_id);
buffer_cache.EraseChannel(channel_id);
}

View File

@@ -133,6 +133,10 @@ public:
void ReleaseChannel(s32 channel_id) override;
std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() {
return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex};
}
private:
static constexpr size_t MAX_TEXTURES = 192;
static constexpr size_t MAX_IMAGES = 48;

View File

@@ -227,14 +227,14 @@ add_executable(yuzu
yuzu.rc
)
if (WIN32 AND YUZU_CRASH_DUMPS)
if (YUZU_CRASH_DUMPS)
target_sources(yuzu PRIVATE
mini_dump.cpp
mini_dump.h
breakpad.cpp
breakpad.h
)
target_link_libraries(yuzu PRIVATE ${DBGHELP_LIBRARY})
target_compile_definitions(yuzu PRIVATE -DYUZU_DBGHELP)
target_link_libraries(yuzu PRIVATE libbreakpad_client)
target_compile_definitions(yuzu PRIVATE YUZU_CRASH_DUMPS)
endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")

77
src/yuzu/breakpad.cpp Normal file
View File

@@ -0,0 +1,77 @@
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <ranges>
#if defined(_WIN32)
#include <client/windows/handler/exception_handler.h>
#elif defined(__linux__)
#include <client/linux/handler/exception_handler.h>
#else
#error Minidump creation not supported on this platform
#endif
#include "common/fs/fs_paths.h"
#include "common/fs/path_util.h"
#include "yuzu/breakpad.h"
namespace Breakpad {
static void PruneDumpDirectory(const std::filesystem::path& dump_path) {
// Code in this function should be exception-safe.
struct Entry {
std::filesystem::path path;
std::filesystem::file_time_type last_write_time;
};
std::vector<Entry> existing_dumps;
// Get existing entries.
std::error_code ec;
std::filesystem::directory_iterator dir(dump_path, ec);
for (auto& entry : dir) {
if (entry.is_regular_file()) {
existing_dumps.push_back(Entry{
.path = entry.path(),
.last_write_time = entry.last_write_time(ec),
});
}
}
// Sort descending by creation date.
std::ranges::stable_sort(existing_dumps, [](const auto& a, const auto& b) {
return a.last_write_time > b.last_write_time;
});
// Delete older dumps.
for (size_t i = 5; i < existing_dumps.size(); i++) {
std::filesystem::remove(existing_dumps[i].path, ec);
}
}
#if defined(__linux__)
[[noreturn]] bool DumpCallback(const google_breakpad::MinidumpDescriptor& descriptor, void* context,
bool succeeded) {
// Prevent time- and space-consuming core dumps from being generated, as we have
// already generated a minidump and a core file will not be useful anyway.
_exit(1);
}
#endif
void InstallCrashHandler() {
// Write crash dumps to profile directory.
const auto dump_path = GetYuzuPath(Common::FS::YuzuPath::CrashDumpsDir);
PruneDumpDirectory(dump_path);
#if defined(_WIN32)
// TODO: If we switch to MinGW builds for Windows, this needs to be wrapped in a C API.
static google_breakpad::ExceptionHandler eh{dump_path, nullptr, nullptr, nullptr,
google_breakpad::ExceptionHandler::HANDLER_ALL};
#elif defined(__linux__)
static google_breakpad::MinidumpDescriptor descriptor{dump_path};
static google_breakpad::ExceptionHandler eh{descriptor, nullptr, DumpCallback,
nullptr, true, -1};
#endif
}
} // namespace Breakpad

10
src/yuzu/breakpad.h Normal file
View File

@@ -0,0 +1,10 @@
// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
namespace Breakpad {
void InstallCrashHandler();
}

View File

@@ -27,16 +27,6 @@ ConfigureDebug::ConfigureDebug(const Core::System& system_, QWidget* parent)
connect(ui->toggle_gdbstub, &QCheckBox::toggled,
[&]() { ui->gdbport_spinbox->setEnabled(ui->toggle_gdbstub->isChecked()); });
connect(ui->create_crash_dumps, &QCheckBox::stateChanged, [&](int) {
if (crash_dump_warning_shown) {
return;
}
QMessageBox::warning(this, tr("Restart Required"),
tr("yuzu is required to restart in order to apply this setting."),
QMessageBox::Ok, QMessageBox::Ok);
crash_dump_warning_shown = true;
});
}
ConfigureDebug::~ConfigureDebug() = default;
@@ -89,13 +79,6 @@ void ConfigureDebug::SetConfiguration() {
ui->disable_web_applet->setEnabled(false);
ui->disable_web_applet->setText(tr("Web applet not compiled"));
#endif
#ifdef YUZU_DBGHELP
ui->create_crash_dumps->setChecked(Settings::values.create_crash_dumps.GetValue());
#else
ui->create_crash_dumps->setEnabled(false);
ui->create_crash_dumps->setText(tr("MiniDump creation not compiled"));
#endif
}
void ConfigureDebug::ApplyConfiguration() {
@@ -107,7 +90,6 @@ void ConfigureDebug::ApplyConfiguration() {
Settings::values.enable_fs_access_log = ui->fs_access_log->isChecked();
Settings::values.reporting_services = ui->reporting_services->isChecked();
Settings::values.dump_audio_commands = ui->dump_audio_commands->isChecked();
Settings::values.create_crash_dumps = ui->create_crash_dumps->isChecked();
Settings::values.quest_flag = ui->quest_flag->isChecked();
Settings::values.use_debug_asserts = ui->use_debug_asserts->isChecked();
Settings::values.use_auto_stub = ui->use_auto_stub->isChecked();

View File

@@ -471,13 +471,6 @@
</property>
</widget>
</item>
<item row="4" column="0">
<widget class="QCheckBox" name="create_crash_dumps">
<property name="text">
<string>Create Minidump After Crash</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QCheckBox" name="dump_audio_commands">
<property name="toolTip">

View File

@@ -159,8 +159,8 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
#include "yuzu/util/clickable_label.h"
#include "yuzu/vk_device_info.h"
#ifdef YUZU_DBGHELP
#include "yuzu/mini_dump.h"
#ifdef YUZU_CRASH_DUMPS
#include "yuzu/breakpad.h"
#endif
using namespace Common::Literals;
@@ -5187,22 +5187,15 @@ int main(int argc, char* argv[]) {
return 0;
}
#ifdef YUZU_DBGHELP
PROCESS_INFORMATION pi;
if (!is_child && Settings::values.create_crash_dumps.GetValue() &&
MiniDump::SpawnDebuggee(argv[0], pi)) {
// Delete the config object so that it doesn't save when the program exits
config.reset(nullptr);
MiniDump::DebugDebuggee(pi);
return 0;
}
#endif
if (StartupChecks(argv[0], &has_broken_vulkan,
Settings::values.perform_vulkan_check.GetValue())) {
return 0;
}
#ifdef YUZU_CRASH_DUMPS
Breakpad::InstallCrashHandler();
#endif
Common::DetachedTasks detached_tasks;
MicroProfileOnThreadCreate("Frontend");
SCOPE_EXIT({ MicroProfileShutdown(); });

View File

@@ -1,202 +0,0 @@
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <cstdio>
#include <cstring>
#include <ctime>
#include <filesystem>
#include <fmt/format.h>
#include <windows.h>
#include "yuzu/mini_dump.h"
#include "yuzu/startup_checks.h"
// dbghelp.h must be included after windows.h
#include <dbghelp.h>
namespace MiniDump {
void CreateMiniDump(HANDLE process_handle, DWORD process_id, MINIDUMP_EXCEPTION_INFORMATION* info,
EXCEPTION_POINTERS* pep) {
char file_name[255];
const std::time_t the_time = std::time(nullptr);
std::strftime(file_name, 255, "yuzu-crash-%Y%m%d%H%M%S.dmp", std::localtime(&the_time));
// Open the file
HANDLE file_handle = CreateFileA(file_name, GENERIC_READ | GENERIC_WRITE, 0, nullptr,
CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
if (file_handle == nullptr || file_handle == INVALID_HANDLE_VALUE) {
fmt::print(stderr, "CreateFileA failed. Error: {}", GetLastError());
return;
}
// Create the minidump
const MINIDUMP_TYPE dump_type = MiniDumpNormal;
const bool write_dump_status = MiniDumpWriteDump(process_handle, process_id, file_handle,
dump_type, (pep != 0) ? info : 0, 0, 0);
if (write_dump_status) {
fmt::print(stderr, "MiniDump created: {}", file_name);
} else {
fmt::print(stderr, "MiniDumpWriteDump failed. Error: {}", GetLastError());
}
// Close the file
CloseHandle(file_handle);
}
void DumpFromDebugEvent(DEBUG_EVENT& deb_ev, PROCESS_INFORMATION& pi) {
EXCEPTION_RECORD& record = deb_ev.u.Exception.ExceptionRecord;
HANDLE thread_handle = OpenThread(THREAD_GET_CONTEXT, false, deb_ev.dwThreadId);
if (thread_handle == nullptr) {
fmt::print(stderr, "OpenThread failed ({})", GetLastError());
return;
}
// Get child process context
CONTEXT context = {};
context.ContextFlags = CONTEXT_ALL;
if (!GetThreadContext(thread_handle, &context)) {
fmt::print(stderr, "GetThreadContext failed ({})", GetLastError());
return;
}
// Create exception pointers for minidump
EXCEPTION_POINTERS ep;
ep.ExceptionRecord = &record;
ep.ContextRecord = &context;
MINIDUMP_EXCEPTION_INFORMATION info;
info.ThreadId = deb_ev.dwThreadId;
info.ExceptionPointers = &ep;
info.ClientPointers = false;
CreateMiniDump(pi.hProcess, pi.dwProcessId, &info, &ep);
if (CloseHandle(thread_handle) == 0) {
fmt::print(stderr, "error: CloseHandle(thread_handle) failed ({})", GetLastError());
}
}
bool SpawnDebuggee(const char* arg0, PROCESS_INFORMATION& pi) {
std::memset(&pi, 0, sizeof(pi));
// Don't debug if we are already being debugged
if (IsDebuggerPresent()) {
return false;
}
if (!SpawnChild(arg0, &pi, 0)) {
fmt::print(stderr, "warning: continuing without crash dumps");
return false;
}
const bool can_debug = DebugActiveProcess(pi.dwProcessId);
if (!can_debug) {
fmt::print(stderr,
"warning: DebugActiveProcess failed ({}), continuing without crash dumps",
GetLastError());
return false;
}
return true;
}
static const char* ExceptionName(DWORD exception) {
switch (exception) {
case EXCEPTION_ACCESS_VIOLATION:
return "EXCEPTION_ACCESS_VIOLATION";
case EXCEPTION_DATATYPE_MISALIGNMENT:
return "EXCEPTION_DATATYPE_MISALIGNMENT";
case EXCEPTION_BREAKPOINT:
return "EXCEPTION_BREAKPOINT";
case EXCEPTION_SINGLE_STEP:
return "EXCEPTION_SINGLE_STEP";
case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
return "EXCEPTION_ARRAY_BOUNDS_EXCEEDED";
case EXCEPTION_FLT_DENORMAL_OPERAND:
return "EXCEPTION_FLT_DENORMAL_OPERAND";
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
return "EXCEPTION_FLT_DIVIDE_BY_ZERO";
case EXCEPTION_FLT_INEXACT_RESULT:
return "EXCEPTION_FLT_INEXACT_RESULT";
case EXCEPTION_FLT_INVALID_OPERATION:
return "EXCEPTION_FLT_INVALID_OPERATION";
case EXCEPTION_FLT_OVERFLOW:
return "EXCEPTION_FLT_OVERFLOW";
case EXCEPTION_FLT_STACK_CHECK:
return "EXCEPTION_FLT_STACK_CHECK";
case EXCEPTION_FLT_UNDERFLOW:
return "EXCEPTION_FLT_UNDERFLOW";
case EXCEPTION_INT_DIVIDE_BY_ZERO:
return "EXCEPTION_INT_DIVIDE_BY_ZERO";
case EXCEPTION_INT_OVERFLOW:
return "EXCEPTION_INT_OVERFLOW";
case EXCEPTION_PRIV_INSTRUCTION:
return "EXCEPTION_PRIV_INSTRUCTION";
case EXCEPTION_IN_PAGE_ERROR:
return "EXCEPTION_IN_PAGE_ERROR";
case EXCEPTION_ILLEGAL_INSTRUCTION:
return "EXCEPTION_ILLEGAL_INSTRUCTION";
case EXCEPTION_NONCONTINUABLE_EXCEPTION:
return "EXCEPTION_NONCONTINUABLE_EXCEPTION";
case EXCEPTION_STACK_OVERFLOW:
return "EXCEPTION_STACK_OVERFLOW";
case EXCEPTION_INVALID_DISPOSITION:
return "EXCEPTION_INVALID_DISPOSITION";
case EXCEPTION_GUARD_PAGE:
return "EXCEPTION_GUARD_PAGE";
case EXCEPTION_INVALID_HANDLE:
return "EXCEPTION_INVALID_HANDLE";
default:
return "unknown exception type";
}
}
void DebugDebuggee(PROCESS_INFORMATION& pi) {
DEBUG_EVENT deb_ev = {};
while (deb_ev.dwDebugEventCode != EXIT_PROCESS_DEBUG_EVENT) {
const bool wait_success = WaitForDebugEvent(&deb_ev, INFINITE);
if (!wait_success) {
fmt::print(stderr, "error: WaitForDebugEvent failed ({})", GetLastError());
return;
}
switch (deb_ev.dwDebugEventCode) {
case OUTPUT_DEBUG_STRING_EVENT:
case CREATE_PROCESS_DEBUG_EVENT:
case CREATE_THREAD_DEBUG_EVENT:
case EXIT_PROCESS_DEBUG_EVENT:
case EXIT_THREAD_DEBUG_EVENT:
case LOAD_DLL_DEBUG_EVENT:
case RIP_EVENT:
case UNLOAD_DLL_DEBUG_EVENT:
// Continue on all other debug events
ContinueDebugEvent(deb_ev.dwProcessId, deb_ev.dwThreadId, DBG_CONTINUE);
break;
case EXCEPTION_DEBUG_EVENT:
EXCEPTION_RECORD& record = deb_ev.u.Exception.ExceptionRecord;
// We want to generate a crash dump if we are seeing the same exception again.
if (!deb_ev.u.Exception.dwFirstChance) {
fmt::print(stderr, "Creating MiniDump on ExceptionCode: 0x{:08x} {}\n",
record.ExceptionCode, ExceptionName(record.ExceptionCode));
DumpFromDebugEvent(deb_ev, pi);
}
// Continue without handling the exception.
// Lets the debuggee use its own exception handler.
// - If one does not exist, we will see the exception once more where we make a minidump
// for. Then when it reaches here again, yuzu will probably crash.
// - DBG_CONTINUE on an exception that the debuggee does not handle can set us up for an
// infinite loop of exceptions.
ContinueDebugEvent(deb_ev.dwProcessId, deb_ev.dwThreadId, DBG_EXCEPTION_NOT_HANDLED);
break;
}
}
}
} // namespace MiniDump

View File

@@ -1,19 +0,0 @@
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <windows.h>
#include <dbghelp.h>
namespace MiniDump {
void CreateMiniDump(HANDLE process_handle, DWORD process_id, MINIDUMP_EXCEPTION_INFORMATION* info,
EXCEPTION_POINTERS* pep);
void DumpFromDebugEvent(DEBUG_EVENT& deb_ev, PROCESS_INFORMATION& pi);
bool SpawnDebuggee(const char* arg0, PROCESS_INFORMATION& pi);
void DebugDebuggee(PROCESS_INFORMATION& pi);
} // namespace MiniDump

View File

@@ -33,10 +33,6 @@
"description": "Compile tests",
"dependencies": [ "catch2" ]
},
"dbghelp": {
"description": "Compile Windows crash dump (Minidump) support",
"dependencies": [ "dbghelp" ]
},
"web-service": {
"description": "Enable web services (telemetry, etc.)",
"dependencies": [