Compare commits
1 Commits
android-11
...
android-10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da3849fe82 |
@@ -3,4 +3,4 @@
|
||||
|
||||
[codespell]
|
||||
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES,./src/android/app/src/main/res
|
||||
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,vas,zink
|
||||
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
|
||||
|
||||
@@ -147,7 +147,3 @@ License: GPL-3.0-or-later
|
||||
Files: src/android/gradle/wrapper/*
|
||||
Copyright: 2023 yuzu Emulator Project
|
||||
License: GPL-3.0-or-later
|
||||
|
||||
Files: externals/stb/*
|
||||
Copyright: Sean Barrett
|
||||
License: MIT
|
||||
|
||||
@@ -294,7 +294,6 @@ find_package(lz4 REQUIRED)
|
||||
find_package(nlohmann_json 3.8 REQUIRED)
|
||||
find_package(Opus 1.3 MODULE)
|
||||
find_package(RenderDoc MODULE)
|
||||
find_package(stb MODULE)
|
||||
find_package(VulkanMemoryAllocator CONFIG)
|
||||
find_package(ZLIB 1.2 REQUIRED)
|
||||
find_package(zstd 1.5 REQUIRED)
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# SPDX-FileCopyrightText: 2023 Alexandre Bouvier <contact@amb.tf>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
find_path(stb_image_INCLUDE_DIR stb_image.h PATH_SUFFIXES stb)
|
||||
find_path(stb_image_resize_INCLUDE_DIR stb_image_resize.h PATH_SUFFIXES stb)
|
||||
find_path(stb_image_write_INCLUDE_DIR stb_image_write.h PATH_SUFFIXES stb)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(stb
|
||||
REQUIRED_VARS
|
||||
stb_image_INCLUDE_DIR
|
||||
stb_image_resize_INCLUDE_DIR
|
||||
stb_image_write_INCLUDE_DIR
|
||||
)
|
||||
|
||||
if (stb_FOUND AND NOT TARGET stb::headers)
|
||||
add_library(stb::headers INTERFACE IMPORTED)
|
||||
set_property(TARGET stb::headers PROPERTY
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${stb_image_INCLUDE_DIR}"
|
||||
"${stb_image_resize_INCLUDE_DIR}"
|
||||
"${stb_image_write_INCLUDE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
stb_image_INCLUDE_DIR
|
||||
stb_image_resize_INCLUDE_DIR
|
||||
stb_image_write_INCLUDE_DIR
|
||||
)
|
||||
@@ -1,6 +1,5 @@
|
||||
| Pull Request | Commit | Title | Author | Merged? |
|
||||
|----|----|----|----|----|
|
||||
| [11910](https://github.com/yuzu-emu/yuzu//pull/11910) | [`8427b9d49`](https://github.com/yuzu-emu/yuzu//pull/11910/files) | renderer_vulkan: ensure exception on surface loss | [liamwhite](https://github.com/liamwhite/) | Yes |
|
||||
|
||||
|
||||
End of merge log. You can find the original README.md below the break.
|
||||
|
||||
1
dist/org.yuzu_emu.yuzu.desktop
vendored
1
dist/org.yuzu_emu.yuzu.desktop
vendored
@@ -13,4 +13,3 @@ Exec=yuzu %f
|
||||
Categories=Game;Emulator;Qt;
|
||||
MimeType=application/x-nx-nro;application/x-nx-nso;application/x-nx-nsp;application/x-nx-xci;
|
||||
Keywords=Nintendo;Switch;
|
||||
StartupWMClass=yuzu
|
||||
|
||||
6
externals/CMakeLists.txt
vendored
6
externals/CMakeLists.txt
vendored
@@ -168,13 +168,9 @@ if (NOT TARGET LLVM::Demangle)
|
||||
add_library(LLVM::Demangle ALIAS demangle)
|
||||
endif()
|
||||
|
||||
add_library(stb stb/stb_dxt.cpp)
|
||||
add_library(stb stb/stb_dxt.cpp stb/stb_image.cpp stb/stb_image_resize.cpp)
|
||||
target_include_directories(stb PUBLIC ./stb)
|
||||
|
||||
if (NOT TARGET stb::headers)
|
||||
add_library(stb::headers ALIAS stb)
|
||||
endif()
|
||||
|
||||
add_library(bc_decoder bc_decoder/bc_decoder.cpp)
|
||||
target_include_directories(bc_decoder PUBLIC ./bc_decoder)
|
||||
|
||||
|
||||
7529
externals/stb/stb_image.cpp
vendored
Normal file
7529
externals/stb/stb_image.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7221
externals/stb/stb_image.h
vendored
7221
externals/stb/stb_image.h
vendored
File diff suppressed because it is too large
Load Diff
2282
externals/stb/stb_image_resize.cpp
vendored
Normal file
2282
externals/stb/stb_image_resize.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2214
externals/stb/stb_image_resize.h
vendored
2214
externals/stb/stb_image_resize.h
vendored
File diff suppressed because it is too large
Load Diff
1724
externals/stb/stb_image_write.h
vendored
1724
externals/stb/stb_image_write.h
vendored
File diff suppressed because it is too large
Load Diff
@@ -26,7 +26,7 @@ import androidx.fragment.app.Fragment
|
||||
import androidx.fragment.app.activityViewModels
|
||||
import androidx.navigation.findNavController
|
||||
import androidx.navigation.fragment.findNavController
|
||||
import androidx.recyclerview.widget.GridLayoutManager
|
||||
import androidx.recyclerview.widget.LinearLayoutManager
|
||||
import com.google.android.material.transition.MaterialSharedAxis
|
||||
import org.yuzu.yuzu_emu.BuildConfig
|
||||
import org.yuzu.yuzu_emu.HomeNavigationDirections
|
||||
@@ -186,8 +186,7 @@ class HomeSettingsFragment : Fragment() {
|
||||
}
|
||||
|
||||
binding.homeSettingsList.apply {
|
||||
layoutManager =
|
||||
GridLayoutManager(requireContext(), resources.getInteger(R.integer.grid_columns))
|
||||
layoutManager = LinearLayoutManager(requireContext())
|
||||
adapter = HomeSettingAdapter(
|
||||
requireActivity() as AppCompatActivity,
|
||||
viewLifecycleOwner,
|
||||
|
||||
@@ -16,8 +16,7 @@
|
||||
<LinearLayout
|
||||
android:id="@+id/option_layout"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="wrap_content"
|
||||
android:layout_gravity="center_vertical">
|
||||
android:layout_height="wrap_content">
|
||||
|
||||
<ImageView
|
||||
android:id="@+id/option_icon"
|
||||
|
||||
@@ -120,8 +120,6 @@ add_library(common STATIC
|
||||
socket_types.h
|
||||
spin_lock.cpp
|
||||
spin_lock.h
|
||||
stb.cpp
|
||||
stb.h
|
||||
steady_clock.cpp
|
||||
steady_clock.h
|
||||
stream.cpp
|
||||
@@ -210,8 +208,6 @@ if (MSVC)
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
set_source_files_properties(stb.cpp PROPERTIES COMPILE_OPTIONS "-Wno-implicit-fallthrough;-Wno-missing-declarations;-Wno-missing-field-initializers")
|
||||
endif()
|
||||
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
@@ -227,7 +223,7 @@ endif()
|
||||
|
||||
create_target_directory_groups(common)
|
||||
|
||||
target_link_libraries(common PUBLIC Boost::context Boost::headers fmt::fmt microprofile stb::headers Threads::Threads)
|
||||
target_link_libraries(common PUBLIC Boost::context Boost::headers fmt::fmt microprofile Threads::Threads)
|
||||
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd LLVM::Demangle)
|
||||
|
||||
if (ANDROID)
|
||||
|
||||
@@ -25,7 +25,6 @@ void ConfigureNvidiaEnvironmentFlags() {
|
||||
|
||||
void(_putenv(fmt::format("__GL_SHADER_DISK_CACHE_PATH={}", windows_path_string).c_str()));
|
||||
void(_putenv("__GL_SHADER_DISK_CACHE_SKIP_CLEANUP=1"));
|
||||
void(_putenv("__GL_THREADED_OPTIMIZATIONS=1"));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#define STB_IMAGE_IMPLEMENTATION
|
||||
#define STB_IMAGE_RESIZE_IMPLEMENTATION
|
||||
#define STB_IMAGE_WRITE_IMPLEMENTATION
|
||||
|
||||
#include "common/stb.h"
|
||||
@@ -1,8 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stb_image.h>
|
||||
#include <stb_image_resize.h>
|
||||
#include <stb_image_write.h>
|
||||
@@ -11,7 +11,6 @@
|
||||
#include <mach/mach.h>
|
||||
#elif defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#include "common/string_util.h"
|
||||
#else
|
||||
#if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
|
||||
#include <pthread_np.h>
|
||||
@@ -83,8 +82,29 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
#ifdef _MSC_VER
|
||||
|
||||
// Sets the debugger-visible name of the current thread.
|
||||
// Uses trick documented in:
|
||||
// https://docs.microsoft.com/en-us/visualstudio/debugger/how-to-set-a-thread-name-in-native-code
|
||||
void SetCurrentThreadName(const char* name) {
|
||||
SetThreadDescription(GetCurrentThread(), UTF8ToUTF16W(name).data());
|
||||
static const DWORD MS_VC_EXCEPTION = 0x406D1388;
|
||||
|
||||
#pragma pack(push, 8)
|
||||
struct THREADNAME_INFO {
|
||||
DWORD dwType; // must be 0x1000
|
||||
LPCSTR szName; // pointer to name (in user addr space)
|
||||
DWORD dwThreadID; // thread ID (-1=caller thread)
|
||||
DWORD dwFlags; // reserved for future use, must be zero
|
||||
} info;
|
||||
#pragma pack(pop)
|
||||
|
||||
info.dwType = 0x1000;
|
||||
info.szName = name;
|
||||
info.dwThreadID = std::numeric_limits<DWORD>::max();
|
||||
info.dwFlags = 0;
|
||||
|
||||
__try {
|
||||
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
|
||||
} __except (EXCEPTION_CONTINUE_EXECUTION) {
|
||||
}
|
||||
}
|
||||
|
||||
#else // !MSVC_VER, so must be POSIX threads
|
||||
|
||||
@@ -86,9 +86,9 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
|
||||
|
||||
std::map<std::string, Symbols::Symbols> symbols;
|
||||
for (const auto& module : modules) {
|
||||
symbols.insert_or_assign(module.second,
|
||||
Symbols::GetSymbols(module.first, system.ApplicationMemory(),
|
||||
system.ApplicationProcess()->Is64Bit()));
|
||||
symbols.insert_or_assign(
|
||||
module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
|
||||
system.ApplicationProcess()->Is64BitProcess()));
|
||||
}
|
||||
|
||||
for (auto& entry : out) {
|
||||
|
||||
@@ -116,8 +116,11 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
|
||||
}
|
||||
}
|
||||
|
||||
return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(dir->GetName(),
|
||||
std::move(concat));
|
||||
if (concat.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(concat, dir->GetName());
|
||||
}
|
||||
|
||||
if (Common::FS::IsDir(path)) {
|
||||
@@ -309,10 +312,17 @@ struct System::Impl {
|
||||
|
||||
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
|
||||
|
||||
// Create a resource limit for the process.
|
||||
const auto physical_memory_size =
|
||||
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
|
||||
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
|
||||
|
||||
// Create the process.
|
||||
auto main_process = Kernel::KProcess::Create(system.Kernel());
|
||||
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
|
||||
Kernel::KProcess::ProcessType::Userland, resource_limit)
|
||||
.IsSuccess());
|
||||
Kernel::KProcess::Register(system.Kernel(), main_process);
|
||||
kernel.AppendNewProcess(main_process);
|
||||
kernel.MakeApplicationProcess(main_process);
|
||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||
if (load_result != Loader::ResultStatus::Success) {
|
||||
@@ -411,7 +421,6 @@ struct System::Impl {
|
||||
services->KillNVNFlinger();
|
||||
}
|
||||
kernel.CloseServices();
|
||||
kernel.ShutdownCores();
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
@@ -423,6 +432,7 @@ struct System::Impl {
|
||||
gpu_core.reset();
|
||||
host1x_core.reset();
|
||||
perf_stats.reset();
|
||||
kernel.ShutdownCores();
|
||||
cpu_manager.Shutdown();
|
||||
debugger.reset();
|
||||
kernel.Shutdown();
|
||||
|
||||
@@ -258,20 +258,20 @@ private:
|
||||
Kernel::KScopedSchedulerLock sl{system.Kernel()};
|
||||
|
||||
// Put all threads to sleep on next scheduler round.
|
||||
for (auto& thread : ThreadList()) {
|
||||
thread.RequestSuspend(Kernel::SuspendType::Debug);
|
||||
for (auto* thread : ThreadList()) {
|
||||
thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
}
|
||||
}
|
||||
|
||||
void ResumeEmulation(Kernel::KThread* except = nullptr) {
|
||||
// Wake up all threads.
|
||||
for (auto& thread : ThreadList()) {
|
||||
if (std::addressof(thread) == except) {
|
||||
for (auto* thread : ThreadList()) {
|
||||
if (thread == except) {
|
||||
continue;
|
||||
}
|
||||
|
||||
thread.SetStepState(Kernel::StepState::NotStepping);
|
||||
thread.Resume(Kernel::SuspendType::Debug);
|
||||
thread->SetStepState(Kernel::StepState::NotStepping);
|
||||
thread->Resume(Kernel::SuspendType::Debug);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,17 +283,13 @@ private:
|
||||
}
|
||||
|
||||
void UpdateActiveThread() {
|
||||
auto& threads{ThreadList()};
|
||||
for (auto& thread : threads) {
|
||||
if (std::addressof(thread) == state->active_thread) {
|
||||
// Thread is still alive, no need to update.
|
||||
return;
|
||||
}
|
||||
const auto& threads{ThreadList()};
|
||||
if (std::find(threads.begin(), threads.end(), state->active_thread) == threads.end()) {
|
||||
state->active_thread = threads.front();
|
||||
}
|
||||
state->active_thread = std::addressof(threads.front());
|
||||
}
|
||||
|
||||
Kernel::KProcess::ThreadList& ThreadList() {
|
||||
const std::list<Kernel::KThread*>& ThreadList() {
|
||||
return system.ApplicationProcess()->GetThreadList();
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ static std::string EscapeXML(std::string_view data) {
|
||||
|
||||
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
|
||||
: DebuggerFrontend(backend_), system{system_} {
|
||||
if (system.ApplicationProcess()->Is64Bit()) {
|
||||
if (system.ApplicationProcess()->Is64BitProcess()) {
|
||||
arch = std::make_unique<GDBStubA64>();
|
||||
} else {
|
||||
arch = std::make_unique<GDBStubA32>();
|
||||
@@ -446,10 +446,10 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
|
||||
|
||||
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread& thread) {
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
|
||||
const VAddr argument_thread_type{thread.GetArgument()};
|
||||
const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
// Probably not created by nnsdk, no name available.
|
||||
@@ -477,10 +477,10 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
|
||||
}
|
||||
|
||||
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread& thread) {
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
|
||||
const VAddr argument_thread_type{thread.GetArgument()};
|
||||
const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
// Probably not created by nnsdk, no name available.
|
||||
@@ -508,16 +508,16 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
|
||||
}
|
||||
|
||||
static std::optional<std::string> GetThreadName(Core::System& system,
|
||||
const Kernel::KThread& thread) {
|
||||
if (system.ApplicationProcess()->Is64Bit()) {
|
||||
const Kernel::KThread* thread) {
|
||||
if (system.ApplicationProcess()->Is64BitProcess()) {
|
||||
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
|
||||
} else {
|
||||
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
|
||||
}
|
||||
}
|
||||
|
||||
static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
|
||||
switch (thread.GetWaitReasonForDebugging()) {
|
||||
static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
|
||||
switch (thread->GetWaitReasonForDebugging()) {
|
||||
case Kernel::ThreadWaitReasonForDebugging::Sleep:
|
||||
return "Sleep";
|
||||
case Kernel::ThreadWaitReasonForDebugging::IPC:
|
||||
@@ -535,8 +535,8 @@ static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
|
||||
}
|
||||
}
|
||||
|
||||
static std::string GetThreadState(const Kernel::KThread& thread) {
|
||||
switch (thread.GetState()) {
|
||||
static std::string GetThreadState(const Kernel::KThread* thread) {
|
||||
switch (thread->GetState()) {
|
||||
case Kernel::ThreadState::Initialized:
|
||||
return "Initialized";
|
||||
case Kernel::ThreadState::Waiting:
|
||||
@@ -604,7 +604,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
std::vector<std::string> thread_ids;
|
||||
for (const auto& thread : threads) {
|
||||
thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
|
||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
|
||||
}
|
||||
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
|
||||
} else if (command.starts_with("sThreadInfo")) {
|
||||
@@ -616,14 +616,14 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
buffer += "<threads>";
|
||||
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
for (const auto* thread : threads) {
|
||||
auto thread_name{GetThreadName(system, thread)};
|
||||
if (!thread_name) {
|
||||
thread_name = fmt::format("Thread {:d}", thread.GetThreadId());
|
||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
|
||||
}
|
||||
|
||||
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
|
||||
thread.GetThreadId(), thread.GetActiveCore(),
|
||||
thread->GetThreadId(), thread->GetActiveCore(),
|
||||
EscapeXML(*thread_name), GetThreadState(thread));
|
||||
}
|
||||
|
||||
@@ -822,13 +822,11 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
|
||||
const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
|
||||
const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
|
||||
const char p =
|
||||
True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
|
||||
|
||||
reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
|
||||
mem_info.base_address,
|
||||
mem_info.base_address + mem_info.size - 1, perm, state, l, i,
|
||||
d, u, p, mem_info.ipc_count, mem_info.device_count);
|
||||
reply +=
|
||||
fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n",
|
||||
mem_info.base_address, mem_info.base_address + mem_info.size - 1,
|
||||
perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count);
|
||||
}
|
||||
|
||||
const uintptr_t next_address = mem_info.base_address + mem_info.size;
|
||||
@@ -850,10 +848,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
}
|
||||
|
||||
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
||||
auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||
for (auto& thread : threads) {
|
||||
if (thread.GetThreadId() == thread_id) {
|
||||
return std::addressof(thread);
|
||||
const auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||
for (auto* thread : threads) {
|
||||
if (thread->GetThreadId() == thread_id) {
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -107,56 +107,62 @@ static u64 romfs_get_hash_table_count(u64 num_entries) {
|
||||
|
||||
void RomFSBuildContext::VisitDirectory(VirtualDir romfs_dir, VirtualDir ext_dir,
|
||||
std::shared_ptr<RomFSBuildDirectoryContext> parent) {
|
||||
for (auto& child_romfs_file : romfs_dir->GetFiles()) {
|
||||
const auto name = child_romfs_file->GetName();
|
||||
const auto child = std::make_shared<RomFSBuildFileContext>();
|
||||
// Set child's path.
|
||||
child->cur_path_ofs = parent->path_len + 1;
|
||||
child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
|
||||
child->path = parent->path + "/" + name;
|
||||
std::vector<std::shared_ptr<RomFSBuildDirectoryContext>> child_dirs;
|
||||
|
||||
if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
|
||||
continue;
|
||||
}
|
||||
const auto entries = romfs_dir->GetEntries();
|
||||
|
||||
// Sanity check on path_len
|
||||
ASSERT(child->path_len < FS_MAX_PATH);
|
||||
for (const auto& kv : entries) {
|
||||
if (kv.second == VfsEntryType::Directory) {
|
||||
const auto child = std::make_shared<RomFSBuildDirectoryContext>();
|
||||
// Set child's path.
|
||||
child->cur_path_ofs = parent->path_len + 1;
|
||||
child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
|
||||
child->path = parent->path + "/" + kv.first;
|
||||
|
||||
child->source = std::move(child_romfs_file);
|
||||
if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ext_dir != nullptr) {
|
||||
if (const auto ips = ext_dir->GetFile(name + ".ips")) {
|
||||
if (auto patched = PatchIPS(child->source, ips)) {
|
||||
child->source = std::move(patched);
|
||||
// Sanity check on path_len
|
||||
ASSERT(child->path_len < FS_MAX_PATH);
|
||||
|
||||
if (AddDirectory(parent, child)) {
|
||||
child_dirs.push_back(child);
|
||||
}
|
||||
} else {
|
||||
const auto child = std::make_shared<RomFSBuildFileContext>();
|
||||
// Set child's path.
|
||||
child->cur_path_ofs = parent->path_len + 1;
|
||||
child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
|
||||
child->path = parent->path + "/" + kv.first;
|
||||
|
||||
if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Sanity check on path_len
|
||||
ASSERT(child->path_len < FS_MAX_PATH);
|
||||
|
||||
child->source = romfs_dir->GetFile(kv.first);
|
||||
|
||||
if (ext_dir != nullptr) {
|
||||
if (const auto ips = ext_dir->GetFile(kv.first + ".ips")) {
|
||||
if (auto patched = PatchIPS(child->source, ips)) {
|
||||
child->source = std::move(patched);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
child->size = child->source->GetSize();
|
||||
|
||||
AddFile(parent, child);
|
||||
}
|
||||
|
||||
child->size = child->source->GetSize();
|
||||
|
||||
AddFile(parent, child);
|
||||
}
|
||||
|
||||
for (auto& child_romfs_dir : romfs_dir->GetSubdirectories()) {
|
||||
const auto name = child_romfs_dir->GetName();
|
||||
const auto child = std::make_shared<RomFSBuildDirectoryContext>();
|
||||
// Set child's path.
|
||||
child->cur_path_ofs = parent->path_len + 1;
|
||||
child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
|
||||
child->path = parent->path + "/" + name;
|
||||
|
||||
if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Sanity check on path_len
|
||||
ASSERT(child->path_len < FS_MAX_PATH);
|
||||
|
||||
if (!AddDirectory(parent, child)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(name) : nullptr;
|
||||
for (auto& child : child_dirs) {
|
||||
auto subdir_name = std::string_view(child->path).substr(child->cur_path_ofs);
|
||||
auto child_romfs_dir = romfs_dir->GetSubdirectory(subdir_name);
|
||||
auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(subdir_name) : nullptr;
|
||||
this->VisitDirectory(child_romfs_dir, child_ext_dir, child);
|
||||
}
|
||||
}
|
||||
@@ -287,7 +293,7 @@ std::multimap<u64, VirtualFile> RomFSBuildContext::Build() {
|
||||
|
||||
cur_entry.name_size = name_size;
|
||||
|
||||
out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, std::move(cur_file->source));
|
||||
out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, cur_file->source);
|
||||
std::memcpy(file_table.data() + cur_file->entry_offset, &cur_entry, sizeof(RomFSFileEntry));
|
||||
std::memset(file_table.data() + cur_file->entry_offset + sizeof(RomFSFileEntry), 0,
|
||||
Common::AlignUp(cur_entry.name_size, 4));
|
||||
|
||||
@@ -377,16 +377,16 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
|
||||
|
||||
auto romfs_dir = FindSubdirectoryCaseless(subdir, "romfs");
|
||||
if (romfs_dir != nullptr)
|
||||
layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(romfs_dir)));
|
||||
layers.push_back(std::make_shared<CachedVfsDirectory>(romfs_dir));
|
||||
|
||||
auto ext_dir = FindSubdirectoryCaseless(subdir, "romfs_ext");
|
||||
if (ext_dir != nullptr)
|
||||
layers_ext.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(ext_dir)));
|
||||
layers_ext.push_back(std::make_shared<CachedVfsDirectory>(ext_dir));
|
||||
|
||||
if (type == ContentRecordType::HtmlDocument) {
|
||||
auto manual_dir = FindSubdirectoryCaseless(subdir, "manual_html");
|
||||
if (manual_dir != nullptr)
|
||||
layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(manual_dir)));
|
||||
layers.push_back(std::make_shared<CachedVfsDirectory>(manual_dir));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -400,7 +400,7 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
|
||||
return;
|
||||
}
|
||||
|
||||
layers.emplace_back(std::move(extracted));
|
||||
layers.push_back(std::move(extracted));
|
||||
|
||||
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
|
||||
if (layered == nullptr) {
|
||||
|
||||
@@ -104,16 +104,16 @@ Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
|
||||
}
|
||||
|
||||
/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
|
||||
// Allow use of cores 0~3 and thread priorities 16~63.
|
||||
constexpr u32 default_thread_info_capability = 0x30043F7;
|
||||
// Allow use of cores 0~3 and thread priorities 1~63.
|
||||
constexpr u32 default_thread_info_capability = 0x30007F7;
|
||||
|
||||
ProgramMetadata result;
|
||||
|
||||
result.LoadManual(
|
||||
true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
|
||||
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x100000 /*main_thread_stack_size*/,
|
||||
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 0 /*system_resource_size*/,
|
||||
{default_thread_info_capability} /*capabilities*/);
|
||||
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
|
||||
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/,
|
||||
0x1FE00000 /*system_resource_size*/, {default_thread_info_capability} /*capabilities*/);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -73,9 +73,6 @@ public:
|
||||
u64 GetFilesystemPermissions() const;
|
||||
u32 GetSystemResourceSize() const;
|
||||
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
|
||||
const std::array<u8, 0x10>& GetName() const {
|
||||
return npdm_header.application_name;
|
||||
}
|
||||
|
||||
void Print() const;
|
||||
|
||||
@@ -167,14 +164,14 @@ private:
|
||||
u32_le unk_size_2;
|
||||
};
|
||||
|
||||
Header npdm_header{};
|
||||
AciHeader aci_header{};
|
||||
AcidHeader acid_header{};
|
||||
Header npdm_header;
|
||||
AciHeader aci_header;
|
||||
AcidHeader acid_header;
|
||||
|
||||
FileAccessControl acid_file_access{};
|
||||
FileAccessHeader aci_file_access{};
|
||||
FileAccessControl acid_file_access;
|
||||
FileAccessHeader aci_file_access;
|
||||
|
||||
KernelCapabilityDescriptors aci_kernel_capabilities{};
|
||||
KernelCapabilityDescriptors aci_kernel_capabilities;
|
||||
};
|
||||
|
||||
} // namespace FileSys
|
||||
|
||||
@@ -322,8 +322,7 @@ VirtualFile RegisteredCache::OpenFileOrDirectoryConcat(const VirtualDir& open_di
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto name = concat.front()->GetName();
|
||||
return ConcatenatedVfsFile::MakeConcatenatedFile(std::move(name), std::move(concat));
|
||||
return ConcatenatedVfsFile::MakeConcatenatedFile(concat, concat.front()->GetName());
|
||||
}
|
||||
|
||||
VirtualFile RegisteredCache::GetFileAtID(NcaID id) const {
|
||||
|
||||
@@ -133,7 +133,7 @@ VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
|
||||
out = out->GetSubdirectories().front();
|
||||
}
|
||||
|
||||
return std::make_shared<CachedVfsDirectory>(std::move(out));
|
||||
return std::make_shared<CachedVfsDirectory>(out);
|
||||
}
|
||||
|
||||
VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
|
||||
@@ -141,7 +141,8 @@ VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
|
||||
return nullptr;
|
||||
|
||||
RomFSBuildContext ctx{dir, ext};
|
||||
return ConcatenatedVfsFile::MakeConcatenatedFile(0, dir->GetName(), ctx.Build());
|
||||
auto file_map = ctx.Build();
|
||||
return ConcatenatedVfsFile::MakeConcatenatedFile(0, file_map, dir->GetName());
|
||||
}
|
||||
|
||||
} // namespace FileSys
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/file_sys/system_archive/system_version.h"
|
||||
#include "core/file_sys/vfs_vector.h"
|
||||
#include "core/hle/api_version.h"
|
||||
@@ -13,9 +12,6 @@ std::string GetLongDisplayVersion() {
|
||||
}
|
||||
|
||||
VirtualDir SystemVersion() {
|
||||
LOG_WARNING(Common_Filesystem, "called - Using hardcoded firmware version '{}'",
|
||||
GetLongDisplayVersion());
|
||||
|
||||
VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file");
|
||||
file->WriteObject(HLE::ApiVersion::HOS_VERSION_MAJOR, 0);
|
||||
file->WriteObject(HLE::ApiVersion::HOS_VERSION_MINOR, 1);
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
CachedVfsDirectory::CachedVfsDirectory(VirtualDir&& source_dir)
|
||||
CachedVfsDirectory::CachedVfsDirectory(VirtualDir& source_dir)
|
||||
: name(source_dir->GetName()), parent(source_dir->GetParentDirectory()) {
|
||||
for (auto& dir : source_dir->GetSubdirectories()) {
|
||||
dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(std::move(dir)));
|
||||
dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(dir));
|
||||
}
|
||||
for (auto& file : source_dir->GetFiles()) {
|
||||
files.emplace(file->GetName(), std::move(file));
|
||||
files.emplace(file->GetName(), file);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ namespace FileSys {
|
||||
|
||||
class CachedVfsDirectory : public ReadOnlyVfsDirectory {
|
||||
public:
|
||||
CachedVfsDirectory(VirtualDir&& source_directory);
|
||||
CachedVfsDirectory(VirtualDir& source_directory);
|
||||
|
||||
~CachedVfsDirectory() override;
|
||||
VirtualFile GetFile(std::string_view file_name) const override;
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
ConcatenatedVfsFile::ConcatenatedVfsFile(std::string&& name_, ConcatenationMap&& concatenation_map_)
|
||||
ConcatenatedVfsFile::ConcatenatedVfsFile(ConcatenationMap&& concatenation_map_, std::string&& name_)
|
||||
: concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) {
|
||||
DEBUG_ASSERT(this->VerifyContinuity());
|
||||
}
|
||||
@@ -30,8 +30,8 @@ bool ConcatenatedVfsFile::VerifyContinuity() const {
|
||||
|
||||
ConcatenatedVfsFile::~ConcatenatedVfsFile() = default;
|
||||
|
||||
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::string&& name,
|
||||
std::vector<VirtualFile>&& files) {
|
||||
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualFile>& files,
|
||||
std::string&& name) {
|
||||
// Fold trivial cases.
|
||||
if (files.empty()) {
|
||||
return nullptr;
|
||||
@@ -46,21 +46,20 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::string&& name,
|
||||
u64 last_offset = 0;
|
||||
|
||||
for (auto& file : files) {
|
||||
const auto size = file->GetSize();
|
||||
|
||||
concatenation_map.emplace_back(ConcatenationEntry{
|
||||
.offset = last_offset,
|
||||
.file = std::move(file),
|
||||
.file = file,
|
||||
});
|
||||
|
||||
last_offset += size;
|
||||
last_offset += file->GetSize();
|
||||
}
|
||||
|
||||
return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
|
||||
return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
|
||||
}
|
||||
|
||||
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, std::string&& name,
|
||||
std::multimap<u64, VirtualFile>&& files) {
|
||||
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
|
||||
const std::multimap<u64, VirtualFile>& files,
|
||||
std::string&& name) {
|
||||
// Fold trivial cases.
|
||||
if (files.empty()) {
|
||||
return nullptr;
|
||||
@@ -77,8 +76,6 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, std::strin
|
||||
|
||||
// Iteration of a multimap is ordered, so offset will be strictly non-decreasing.
|
||||
for (auto& [offset, file] : files) {
|
||||
const auto size = file->GetSize();
|
||||
|
||||
if (offset > last_offset) {
|
||||
concatenation_map.emplace_back(ConcatenationEntry{
|
||||
.offset = last_offset,
|
||||
@@ -88,13 +85,13 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, std::strin
|
||||
|
||||
concatenation_map.emplace_back(ConcatenationEntry{
|
||||
.offset = offset,
|
||||
.file = std::move(file),
|
||||
.file = file,
|
||||
});
|
||||
|
||||
last_offset = offset + size;
|
||||
last_offset = offset + file->GetSize();
|
||||
}
|
||||
|
||||
return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
|
||||
return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
|
||||
}
|
||||
|
||||
std::string ConcatenatedVfsFile::GetName() const {
|
||||
|
||||
@@ -24,20 +24,22 @@ private:
|
||||
};
|
||||
using ConcatenationMap = std::vector<ConcatenationEntry>;
|
||||
|
||||
explicit ConcatenatedVfsFile(std::string&& name,
|
||||
std::vector<ConcatenationEntry>&& concatenation_map);
|
||||
explicit ConcatenatedVfsFile(std::vector<ConcatenationEntry>&& concatenation_map,
|
||||
std::string&& name);
|
||||
bool VerifyContinuity() const;
|
||||
|
||||
public:
|
||||
~ConcatenatedVfsFile() override;
|
||||
|
||||
/// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases.
|
||||
static VirtualFile MakeConcatenatedFile(std::string&& name, std::vector<VirtualFile>&& files);
|
||||
static VirtualFile MakeConcatenatedFile(const std::vector<VirtualFile>& files,
|
||||
std::string&& name);
|
||||
|
||||
/// Convenience function that turns a map of offsets to files into a concatenated file, filling
|
||||
/// gaps with a given filler byte.
|
||||
static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::string&& name,
|
||||
std::multimap<u64, VirtualFile>&& files);
|
||||
static VirtualFile MakeConcatenatedFile(u8 filler_byte,
|
||||
const std::multimap<u64, VirtualFile>& files,
|
||||
std::string&& name);
|
||||
|
||||
std::string GetName() const override;
|
||||
std::size_t GetSize() const override;
|
||||
|
||||
@@ -38,7 +38,7 @@ VirtualDir LayeredVfsDirectory::GetDirectoryRelative(std::string_view path) cons
|
||||
for (const auto& layer : dirs) {
|
||||
auto dir = layer->GetDirectoryRelative(path);
|
||||
if (dir != nullptr) {
|
||||
out.emplace_back(std::move(dir));
|
||||
out.push_back(std::move(dir));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,11 +62,11 @@ std::vector<VirtualFile> LayeredVfsDirectory::GetFiles() const {
|
||||
std::set<std::string, std::less<>> out_names;
|
||||
|
||||
for (const auto& layer : dirs) {
|
||||
for (auto& file : layer->GetFiles()) {
|
||||
for (const auto& file : layer->GetFiles()) {
|
||||
auto file_name = file->GetName();
|
||||
if (!out_names.contains(file_name)) {
|
||||
out_names.emplace(std::move(file_name));
|
||||
out.emplace_back(std::move(file));
|
||||
out.push_back(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,7 @@ std::vector<VirtualDir> LayeredVfsDirectory::GetSubdirectories() const {
|
||||
std::vector<VirtualDir> out;
|
||||
out.reserve(names.size());
|
||||
for (const auto& subdir : names)
|
||||
out.emplace_back(GetSubdirectory(subdir));
|
||||
out.push_back(GetSubdirectory(subdir));
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -8,11 +8,7 @@
|
||||
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
||||
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_trace.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
@@ -34,8 +30,6 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
|
||||
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
|
||||
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
|
||||
|
||||
constexpr const std::size_t SecureAlignment = 128_KiB;
|
||||
|
||||
namespace {
|
||||
|
||||
using namespace Common::Literals;
|
||||
@@ -189,57 +183,4 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
||||
if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
|
||||
return 0;
|
||||
} else {
|
||||
// return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
||||
Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
|
||||
u32 pool) {
|
||||
// Applet secure memory is handled separately.
|
||||
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
|
||||
|
||||
// Ensure the size is aligned.
|
||||
const size_t alignment =
|
||||
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
|
||||
R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
|
||||
|
||||
// Allocate the memory.
|
||||
const size_t num_pages = size / PageSize;
|
||||
const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
|
||||
num_pages, alignment / PageSize,
|
||||
KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
|
||||
KMemoryManager::Direction::FromFront));
|
||||
R_UNLESS(paddr != 0, ResultOutOfMemory);
|
||||
|
||||
// Ensure we don't leak references to the memory on error.
|
||||
ON_RESULT_FAILURE {
|
||||
kernel.MemoryManager().Close(paddr, num_pages);
|
||||
};
|
||||
|
||||
// We succeeded.
|
||||
*out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
||||
u32 pool) {
|
||||
// Applet secure memory is handled separately.
|
||||
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
|
||||
|
||||
// Ensure the size is aligned.
|
||||
const size_t alignment =
|
||||
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
|
||||
ASSERT(Common::IsAligned(GetInteger(address), alignment));
|
||||
ASSERT(Common::IsAligned(size, alignment));
|
||||
|
||||
// Close the secure region's pages.
|
||||
kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
|
||||
size / PageSize);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Board::Nintendo::Nx
|
||||
|
||||
@@ -4,11 +4,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
class KernelCore;
|
||||
}
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
@@ -30,16 +25,8 @@ public:
|
||||
static std::size_t GetMinimumNonSecureSystemPoolSize();
|
||||
};
|
||||
|
||||
// Randomness.
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
|
||||
// Secure Memory.
|
||||
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
||||
static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
|
||||
u32 pool);
|
||||
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
|
||||
u32 pool);
|
||||
};
|
||||
|
||||
} // namespace Kernel::Board::Nintendo::Nx
|
||||
|
||||
@@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize ==
|
||||
/// memory.
|
||||
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
|
||||
KVirtualAddress slab_addr) {
|
||||
slab_addr -= memory_layout.GetSlabRegion().GetAddress();
|
||||
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
|
||||
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
|
||||
}
|
||||
|
||||
@@ -196,12 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Get the start of the slab region, since that's where we'll be working.
|
||||
const KMemoryRegion& slab_region = memory_layout.GetSlabRegion();
|
||||
KVirtualAddress address = slab_region.GetAddress();
|
||||
|
||||
// Clear the slab region.
|
||||
// TODO: implement access to kernel VAs.
|
||||
// std::memset(device_ptr, 0, slab_region.GetSize());
|
||||
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
|
||||
|
||||
// Initialize slab type array to be in sorted order.
|
||||
std::array<KSlabType, KSlabType_Count> slab_types;
|
||||
|
||||
@@ -19,8 +19,4 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
|
||||
MainMemoryAddress);
|
||||
}
|
||||
|
||||
static inline size_t GetInitialProcessBinarySize() {
|
||||
return InitialProcessBinarySizeMax;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -200,8 +200,8 @@ private:
|
||||
|
||||
RawCapabilityValue raw;
|
||||
BitField<0, 15, CapabilityType> id;
|
||||
BitField<15, 4, u32> minor_version;
|
||||
BitField<19, 13, u32> major_version;
|
||||
BitField<15, 4, u32> major_version;
|
||||
BitField<19, 13, u32> minor_version;
|
||||
};
|
||||
|
||||
union HandleTable {
|
||||
|
||||
@@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Remove waiter thread.
|
||||
bool has_waiters{};
|
||||
@@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress a
|
||||
|
||||
// Write the value to userspace.
|
||||
Result result{ResultSuccess};
|
||||
if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
|
||||
if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
result = ResultInvalidCurrentMemory;
|
||||
@@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress a
|
||||
}
|
||||
}
|
||||
|
||||
Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
|
||||
u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
|
||||
|
||||
// Wait for the address.
|
||||
KThread* owner_thread{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = GetCurrentProcess(kernel)
|
||||
owner_thread = GetCurrentProcess(m_kernel)
|
||||
.GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||
.ReleasePointerUnsafe();
|
||||
|
||||
@@ -24,12 +24,11 @@ public:
|
||||
explicit KConditionVariable(Core::System& system);
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration.
|
||||
static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
|
||||
static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
|
||||
u32 value);
|
||||
// Arbitration
|
||||
Result SignalToAddress(KProcessAddress addr);
|
||||
Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
|
||||
|
||||
// Condition variable.
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Pin the current thread.
|
||||
process->PinCurrentThread();
|
||||
process->PinCurrentThread(core_id);
|
||||
|
||||
// Set the interrupt flag for the thread.
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
|
||||
@@ -36,7 +36,6 @@ enum class KMemoryState : u32 {
|
||||
FlagCanChangeAttribute = (1 << 24),
|
||||
FlagCanCodeMemory = (1 << 25),
|
||||
FlagLinearMapped = (1 << 26),
|
||||
FlagCanPermissionLock = (1 << 27),
|
||||
|
||||
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||
@@ -51,16 +50,12 @@ enum class KMemoryState : u32 {
|
||||
FlagLinearMapped,
|
||||
|
||||
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||
|
||||
IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap,
|
||||
IoRegister =
|
||||
static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap,
|
||||
|
||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap,
|
||||
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||
FlagCanCodeMemory | FlagCanPermissionLock,
|
||||
FlagCanCodeMemory,
|
||||
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
|
||||
FlagLinearMapped,
|
||||
@@ -70,8 +65,7 @@ enum class KMemoryState : u32 {
|
||||
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||
FlagCanCodeAlias,
|
||||
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
|
||||
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory |
|
||||
FlagCanPermissionLock,
|
||||
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
|
||||
|
||||
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
@@ -79,7 +73,7 @@ enum class KMemoryState : u32 {
|
||||
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped,
|
||||
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
|
||||
|
||||
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
@@ -100,7 +94,7 @@ enum class KMemoryState : u32 {
|
||||
NonDeviceIpc =
|
||||
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel),
|
||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
||||
|
||||
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
|
||||
@@ -111,36 +105,34 @@ enum class KMemoryState : u32 {
|
||||
|
||||
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
|
||||
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc |
|
||||
FlagCanUseNonDeviceIpc,
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001);
|
||||
static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
|
||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
|
||||
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
|
||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
|
||||
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
|
||||
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
|
||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013);
|
||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
|
||||
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
|
||||
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817);
|
||||
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
|
||||
|
||||
enum class KMemoryPermission : u8 {
|
||||
None = 0,
|
||||
@@ -190,9 +182,8 @@ enum class KMemoryAttribute : u8 {
|
||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
|
||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||
PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked),
|
||||
|
||||
SetMask = Uncached | PermissionLocked,
|
||||
SetMask = Uncached,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||
|
||||
@@ -270,10 +261,6 @@ struct KMemoryInfo {
|
||||
return m_state;
|
||||
}
|
||||
|
||||
constexpr Svc::MemoryState GetSvcState() const {
|
||||
return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask);
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return m_permission;
|
||||
}
|
||||
@@ -339,10 +326,6 @@ public:
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr KMemoryState GetState() const {
|
||||
return m_memory_state;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcLockCount() const {
|
||||
return m_ipc_lock_count;
|
||||
}
|
||||
@@ -460,13 +443,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) {
|
||||
ASSERT(False(mask & KMemoryAttribute::IpcLocked));
|
||||
ASSERT(False(mask & KMemoryAttribute::DeviceShared));
|
||||
|
||||
m_attribute = (m_attribute & ~mask) | attr;
|
||||
}
|
||||
|
||||
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
|
||||
ASSERT(this->GetAddress() < addr);
|
||||
ASSERT(this->Contains(addr));
|
||||
|
||||
@@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, it->GetAddress() == address,
|
||||
static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr));
|
||||
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
||||
static_cast<u8>(clear_disable_attr));
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
}
|
||||
@@ -175,9 +175,7 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
|
||||
KProcessAddress address, size_t num_pages,
|
||||
KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
||||
KMemoryPermission perm, KMemoryAttribute attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||
@@ -216,8 +214,7 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr),
|
||||
static_cast<u8>(clear_disable_attr));
|
||||
it->Update(state, perm, attr, false, 0, 0);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
} else {
|
||||
@@ -287,65 +284,6 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
KProcessAddress address, size_t num_pages,
|
||||
KMemoryAttribute mask, KMemoryAttribute attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||
|
||||
KProcessAddress cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
if ((it->GetAttribute() & mask) != attr) {
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.GetAddress() != GetInteger(cur_address)) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
// If we need to, create a new block after and insert it.
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->UpdateAttribute(mask, attr);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
} else {
|
||||
// If we already have the right attributes, just advance.
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages =
|
||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
}
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
// Debug.
|
||||
bool KMemoryBlockManager::CheckState() const {
|
||||
// Loop over every block, ensuring that we are sorted and coalesced.
|
||||
|
||||
@@ -115,11 +115,7 @@ public:
|
||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||
|
||||
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr);
|
||||
KMemoryAttribute attr);
|
||||
|
||||
iterator FindIterator(KProcessAddress address) const {
|
||||
return m_memory_block_tree.find(KMemoryBlock(
|
||||
|
||||
@@ -137,9 +137,11 @@ public:
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetSlabRegion() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab));
|
||||
KVirtualAddress GetSlabRegionAddress() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
|
||||
.GetAddress();
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
|
||||
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#include "core/hle/kernel/initial_process.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
@@ -120,8 +119,7 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||
// Free each region to its corresponding heap.
|
||||
size_t reserved_sizes[MaxManagerCount] = {};
|
||||
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||
const size_t ini_size = GetInitialProcessBinarySize();
|
||||
const KPhysicalAddress ini_end = ini_start + ini_size;
|
||||
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||
const KPhysicalAddress ini_last = ini_end - 1;
|
||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
@@ -139,13 +137,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||
}
|
||||
|
||||
// Open/reserve the ini memory.
|
||||
manager.OpenFirst(ini_start, ini_size / PageSize);
|
||||
reserved_sizes[it.GetAttributes()] += ini_size;
|
||||
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
|
||||
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
|
||||
|
||||
// Free memory after the ini to the heap.
|
||||
if (ini_last != cur_last) {
|
||||
ASSERT(cur_end != 0);
|
||||
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
|
||||
manager.Free(ini_end, cur_end - ini_end);
|
||||
}
|
||||
} else {
|
||||
// Ensure there's no partial overlap with the ini image.
|
||||
@@ -169,37 +167,11 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
|
||||
}
|
||||
|
||||
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
const u32 pool_index = static_cast<u32>(pool);
|
||||
|
||||
// Lock the pool.
|
||||
KScopedLightLock lk(m_pool_locks[pool_index]);
|
||||
|
||||
// Check that we don't already have an optimized process.
|
||||
R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
|
||||
|
||||
// Set the optimized process id.
|
||||
m_optimized_process_ids[pool_index] = process_id;
|
||||
m_has_optimized_process[pool_index] = true;
|
||||
|
||||
// Clear the management area for the optimized process.
|
||||
for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
|
||||
manager = this->GetNextManager(manager, Direction::FromFront)) {
|
||||
manager->InitializeOptimizedMemory(m_system.Kernel());
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
const u32 pool_index = static_cast<u32>(pool);
|
||||
|
||||
// Lock the pool.
|
||||
KScopedLightLock lk(m_pool_locks[pool_index]);
|
||||
|
||||
// If the process was optimized, clear it.
|
||||
if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
|
||||
m_has_optimized_process[pool_index] = false;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
|
||||
@@ -234,7 +206,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
|
||||
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
||||
chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Open the first reference to the pages.
|
||||
@@ -282,8 +254,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (unoptimized) {
|
||||
cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
|
||||
pages_per_alloc);
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
@@ -386,8 +357,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
|
||||
cur_pages, fill_pattern);
|
||||
any_new =
|
||||
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
@@ -410,7 +381,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
|
||||
// Track some or all of the current pages.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
|
||||
manager.TrackOptimizedAllocation(cur_address, cur_pages);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
@@ -455,86 +426,17 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
|
||||
return total_management_size;
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
|
||||
auto optimize_pa =
|
||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||
|
||||
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
|
||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||
size_t num_pages) {
|
||||
auto optimize_pa =
|
||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||
|
||||
// Get the range we're tracking.
|
||||
size_t offset = this->GetPageOffset(block);
|
||||
const size_t last = offset + num_pages - 1;
|
||||
|
||||
// Track.
|
||||
while (offset <= last) {
|
||||
// Mark the page as not being optimized-allocated.
|
||||
optimize_map[offset / Common::BitSize<u64>()] &=
|
||||
~(u64(1) << (offset % Common::BitSize<u64>()));
|
||||
|
||||
offset++;
|
||||
}
|
||||
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||
size_t num_pages) {
|
||||
auto optimize_pa =
|
||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
|
||||
|
||||
// Get the range we're tracking.
|
||||
size_t offset = this->GetPageOffset(block);
|
||||
const size_t last = offset + num_pages - 1;
|
||||
|
||||
// Track.
|
||||
while (offset <= last) {
|
||||
// Mark the page as being optimized-allocated.
|
||||
optimize_map[offset / Common::BitSize<u64>()] |=
|
||||
(u64(1) << (offset % Common::BitSize<u64>()));
|
||||
|
||||
offset++;
|
||||
}
|
||||
}
|
||||
|
||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||
size_t num_pages, u8 fill_pattern) {
|
||||
auto& device_memory = kernel.System().DeviceMemory();
|
||||
auto optimize_pa =
|
||||
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
|
||||
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
|
||||
|
||||
// We want to return whether any pages were newly allocated.
|
||||
bool any_new = false;
|
||||
|
||||
// Get the range we're processing.
|
||||
size_t offset = this->GetPageOffset(block);
|
||||
const size_t last = offset + num_pages - 1;
|
||||
|
||||
// Process.
|
||||
while (offset <= last) {
|
||||
// Check if the page has been optimized-allocated before.
|
||||
if ((optimize_map[offset / Common::BitSize<u64>()] &
|
||||
(u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
|
||||
// If not, it's new.
|
||||
any_new = true;
|
||||
|
||||
// Fill the page.
|
||||
auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
|
||||
std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
|
||||
}
|
||||
|
||||
offset++;
|
||||
}
|
||||
|
||||
// Return the number of pages we processed.
|
||||
return any_new;
|
||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
|
||||
u8 fill_pattern) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
||||
|
||||
@@ -216,14 +216,14 @@ private:
|
||||
m_heap.SetInitialUsedSize(reserved_size);
|
||||
}
|
||||
|
||||
void InitializeOptimizedMemory(KernelCore& kernel);
|
||||
void InitializeOptimizedMemory() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||
size_t num_pages);
|
||||
void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
|
||||
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||
|
||||
bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
|
||||
size_t num_pages, u8 fill_pattern);
|
||||
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
|
||||
|
||||
constexpr Pool GetPool() const {
|
||||
return m_pool;
|
||||
|
||||
@@ -190,15 +190,9 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
|
||||
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
|
||||
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() ==
|
||||
(0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_DramReservedEarly =
|
||||
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
@@ -223,18 +217,16 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition =
|
||||
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
|
||||
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
// UNUSED: .Derive(4, 1);
|
||||
// UNUSED: .Derive(4, 2);
|
||||
constexpr inline const auto KMemoryRegionType_DramPoolManagement =
|
||||
KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(
|
||||
constexpr inline auto KMemoryRegionType_DramPoolManagement =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
|
||||
KMemoryRegionAttr_CarveoutProtected);
|
||||
constexpr inline const auto KMemoryRegionType_DramUserPool =
|
||||
KMemoryRegionType_DramPoolPartition.Derive(4, 3);
|
||||
constexpr inline auto KMemoryRegionType_DramUserPool =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
|
||||
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
||||
(0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_CarveoutProtected));
|
||||
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
|
||||
(0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_DramApplicationPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 0);
|
||||
@@ -245,63 +237,60 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
|
||||
constexpr inline auto KMemoryRegionType_DramSystemPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
|
||||
(0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
|
||||
(0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
(0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
|
||||
(0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
(0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
|
||||
(0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_CarveoutProtected));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||
|
||||
// UNUSED: .Derive(4, 2);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
|
||||
KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown =
|
||||
KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
|
||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32));
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92));
|
||||
// UNUSED: .DeriveSparse(2, 2, 0);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
||||
|
||||
// UNUSED: .Derive(4, 3);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A);
|
||||
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
|
||||
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
|
||||
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
|
||||
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
|
||||
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
|
||||
|
||||
// NOTE: For unknown reason, the pools are derived out-of-order here.
|
||||
// It's worth eventually trying to understand why Nintendo made this choice.
|
||||
// UNUSED: .Derive(6, 0);
|
||||
// UNUSED: .Derive(6, 1);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
|
||||
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
|
||||
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A);
|
||||
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
|
||||
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
|
||||
|
||||
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
|
||||
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
|
||||
@@ -365,14 +354,12 @@ constexpr inline auto KMemoryRegionType_KernelTemp =
|
||||
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
|
||||
|
||||
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
|
||||
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
|
||||
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
||||
return KMemoryRegionType_VirtualDramUnknownDebug;
|
||||
} else {
|
||||
|
||||
@@ -183,17 +183,12 @@ private:
|
||||
|
||||
class KScopedPageGroup {
|
||||
public:
|
||||
explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) {
|
||||
explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
|
||||
if (m_pg) {
|
||||
if (not_first) {
|
||||
m_pg->Open();
|
||||
} else {
|
||||
m_pg->OpenFirst();
|
||||
}
|
||||
m_pg->Open();
|
||||
}
|
||||
}
|
||||
explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true)
|
||||
: KScopedPageGroup(std::addressof(gp), not_first) {}
|
||||
explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
|
||||
~KScopedPageGroup() {
|
||||
if (m_pg) {
|
||||
m_pg->Close();
|
||||
|
||||
@@ -82,14 +82,14 @@ public:
|
||||
|
||||
using namespace Common::Literals;
|
||||
|
||||
constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
|
||||
constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
|
||||
switch (as_type) {
|
||||
case Svc::CreateProcessFlag::AddressSpace32Bit:
|
||||
case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
|
||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
return 32;
|
||||
case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
return 36;
|
||||
case Svc::CreateProcessFlag::AddressSpace64Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
return 39;
|
||||
default:
|
||||
ASSERT(false);
|
||||
@@ -105,7 +105,7 @@ KPageTable::KPageTable(Core::System& system_)
|
||||
|
||||
KPageTable::~KPageTable() = default;
|
||||
|
||||
Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
||||
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
bool enable_das_merge, bool from_back,
|
||||
KMemoryManager::Pool pool, KProcessAddress code_addr,
|
||||
size_t code_size, KSystemResource* system_resource,
|
||||
@@ -133,7 +133,7 @@ Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool ena
|
||||
ASSERT(code_addr + code_size - 1 <= end - 1);
|
||||
|
||||
// Adjust heap/alias size if we don't have an alias region
|
||||
if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
|
||||
if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
|
||||
heap_region_size += alias_region_size;
|
||||
alias_region_size = 0;
|
||||
}
|
||||
@@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress
|
||||
R_TRY(this->CheckMemoryStateContiguous(
|
||||
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
|
||||
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
|
||||
KMemoryAttribute::All, KMemoryAttribute::None));
|
||||
|
||||
// Determine whether any pages being unmapped are code.
|
||||
bool any_code_pages = false;
|
||||
@@ -1724,43 +1724,29 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
PageSize;
|
||||
|
||||
// While we have pages to map, map them.
|
||||
{
|
||||
// Create a page group for the current mapping range.
|
||||
KPageGroup cur_pg(m_kernel, m_block_info_manager);
|
||||
{
|
||||
ON_RESULT_FAILURE_2 {
|
||||
cur_pg.OpenFirst();
|
||||
cur_pg.Close();
|
||||
};
|
||||
while (map_pages > 0) {
|
||||
// Check if we're at the end of the physical block.
|
||||
if (pg_pages == 0) {
|
||||
// Ensure there are more pages to map.
|
||||
ASSERT(pg_it != pg.end());
|
||||
|
||||
size_t remain_pages = map_pages;
|
||||
while (remain_pages > 0) {
|
||||
// Check if we're at the end of the physical block.
|
||||
if (pg_pages == 0) {
|
||||
// Ensure there are more pages to map.
|
||||
ASSERT(pg_it != pg.end());
|
||||
|
||||
// Advance our physical block.
|
||||
++pg_it;
|
||||
pg_phys_addr = pg_it->GetAddress();
|
||||
pg_pages = pg_it->GetNumPages();
|
||||
}
|
||||
|
||||
// Add whatever we can to the current block.
|
||||
const size_t cur_pages = std::min(pg_pages, remain_pages);
|
||||
R_TRY(cur_pg.AddBlock(pg_phys_addr +
|
||||
((pg_pages - cur_pages) * PageSize),
|
||||
cur_pages));
|
||||
|
||||
// Advance.
|
||||
remain_pages -= cur_pages;
|
||||
pg_pages -= cur_pages;
|
||||
}
|
||||
// Advance our physical block.
|
||||
++pg_it;
|
||||
pg_phys_addr = pg_it->GetAddress();
|
||||
pg_pages = pg_it->GetNumPages();
|
||||
}
|
||||
|
||||
// Map the pages.
|
||||
R_TRY(this->Operate(cur_address, map_pages, cur_pg,
|
||||
OperationType::MapFirstGroup));
|
||||
// Map whatever we can.
|
||||
const size_t cur_pages = std::min(pg_pages, map_pages);
|
||||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
||||
OperationType::MapFirst, pg_phys_addr));
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
map_pages -= cur_pages;
|
||||
|
||||
pg_phys_addr += cur_pages * PageSize;
|
||||
pg_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1784,11 +1770,7 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
m_memory_block_manager.UpdateIfMatch(
|
||||
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
|
||||
address == this->GetAliasRegionStart()
|
||||
? KMemoryBlockDisableMergeAttribute::Normal
|
||||
: KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
@@ -1886,13 +1868,6 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
|
||||
// Iterate over the memory, unmapping as we go.
|
||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||
|
||||
const auto clear_merge_attr =
|
||||
(it->GetState() == KMemoryState::Normal &&
|
||||
it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
|
||||
? KMemoryBlockDisableMergeAttribute::Normal
|
||||
: KMemoryBlockDisableMergeAttribute::None;
|
||||
|
||||
while (true) {
|
||||
// Check that the iterator is valid.
|
||||
ASSERT(it != m_memory_block_manager.end());
|
||||
@@ -1930,7 +1905,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
||||
KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||
clear_merge_attr);
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
// We succeeded.
|
||||
R_SUCCEED();
|
||||
@@ -2404,7 +2379,8 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
// Perform mapping operation.
|
||||
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
|
||||
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
|
||||
DisableMergeAttribute::DisableHead};
|
||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
||||
|
||||
// Update the blocks.
|
||||
@@ -2446,7 +2422,8 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem
|
||||
KScopedPageTableUpdater updater(this);
|
||||
|
||||
// Perform mapping operation.
|
||||
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
|
||||
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
|
||||
DisableMergeAttribute::DisableHead};
|
||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
||||
|
||||
// Update the blocks.
|
||||
@@ -2675,18 +2652,11 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
|
||||
size_t num_allocator_blocks;
|
||||
constexpr auto AttributeTestMask =
|
||||
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
|
||||
const KMemoryState state_test_mask =
|
||||
static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
|
||||
? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
|
||||
: 0) |
|
||||
((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
|
||||
? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
|
||||
: 0));
|
||||
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
|
||||
std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
||||
addr, size, state_test_mask, state_test_mask,
|
||||
KMemoryPermission::None, KMemoryPermission::None,
|
||||
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
|
||||
R_TRY(this->CheckMemoryState(
|
||||
std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
|
||||
std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute,
|
||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
||||
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
|
||||
|
||||
// Create an update allocator.
|
||||
Result allocator_result{ResultSuccess};
|
||||
@@ -2694,17 +2664,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
|
||||
m_memory_block_slab_manager, num_allocator_blocks);
|
||||
R_TRY(allocator_result);
|
||||
|
||||
// If we need to, perform a change attribute operation.
|
||||
if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
|
||||
// Perform operation.
|
||||
R_TRY(this->Operate(addr, num_pages, old_perm,
|
||||
OperationType::ChangePermissionsAndRefreshAndFlush, 0));
|
||||
}
|
||||
// Determine the new attribute.
|
||||
const KMemoryAttribute new_attr =
|
||||
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
|
||||
static_cast<KMemoryAttribute>(attr & mask)));
|
||||
|
||||
// Perform operation.
|
||||
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
|
||||
|
||||
// Update the blocks.
|
||||
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
|
||||
static_cast<KMemoryAttribute>(mask),
|
||||
static_cast<KMemoryAttribute>(attr));
|
||||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
|
||||
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
@@ -2892,8 +2863,7 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress
|
||||
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
||||
|
||||
// Set whether the locked memory was io.
|
||||
*out_is_io =
|
||||
static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
|
||||
*out_is_io = old_state == KMemoryState::Io;
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
@@ -3051,10 +3021,9 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr
|
||||
ASSERT(num_pages == page_group.GetNumPages());
|
||||
|
||||
switch (operation) {
|
||||
case OperationType::MapGroup:
|
||||
case OperationType::MapFirstGroup: {
|
||||
case OperationType::MapGroup: {
|
||||
// We want to maintain a new reference to every page in the group.
|
||||
KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
|
||||
KScopedPageGroup spg(page_group);
|
||||
|
||||
for (const auto& node : page_group) {
|
||||
const size_t size{node.GetNumPages() * PageSize};
|
||||
@@ -3096,6 +3065,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||
m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
|
||||
break;
|
||||
}
|
||||
case OperationType::MapFirst:
|
||||
case OperationType::Map: {
|
||||
ASSERT(map_addr);
|
||||
ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
|
||||
@@ -3103,7 +3073,11 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||
|
||||
// Open references to pages, if we should.
|
||||
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
|
||||
m_kernel.MemoryManager().Open(map_addr, num_pages);
|
||||
if (operation == OperationType::MapFirst) {
|
||||
m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
|
||||
} else {
|
||||
m_kernel.MemoryManager().Open(map_addr, num_pages);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -3113,7 +3087,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
|
||||
}
|
||||
case OperationType::ChangePermissions:
|
||||
case OperationType::ChangePermissionsAndRefresh:
|
||||
case OperationType::ChangePermissionsAndRefreshAndFlush:
|
||||
break;
|
||||
default:
|
||||
ASSERT(false);
|
||||
@@ -3133,79 +3106,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
|
||||
}
|
||||
}
|
||||
|
||||
KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
|
||||
KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
|
||||
switch (state) {
|
||||
case Svc::MemoryState::Free:
|
||||
case Svc::MemoryState::Kernel:
|
||||
case KMemoryState::Free:
|
||||
case KMemoryState::Kernel:
|
||||
return m_address_space_start;
|
||||
case Svc::MemoryState::Normal:
|
||||
case KMemoryState::Normal:
|
||||
return m_heap_region_start;
|
||||
case Svc::MemoryState::Ipc:
|
||||
case Svc::MemoryState::NonSecureIpc:
|
||||
case Svc::MemoryState::NonDeviceIpc:
|
||||
case KMemoryState::Ipc:
|
||||
case KMemoryState::NonSecureIpc:
|
||||
case KMemoryState::NonDeviceIpc:
|
||||
return m_alias_region_start;
|
||||
case Svc::MemoryState::Stack:
|
||||
case KMemoryState::Stack:
|
||||
return m_stack_region_start;
|
||||
case Svc::MemoryState::Static:
|
||||
case Svc::MemoryState::ThreadLocal:
|
||||
case KMemoryState::Static:
|
||||
case KMemoryState::ThreadLocal:
|
||||
return m_kernel_map_region_start;
|
||||
case Svc::MemoryState::Io:
|
||||
case Svc::MemoryState::Shared:
|
||||
case Svc::MemoryState::AliasCode:
|
||||
case Svc::MemoryState::AliasCodeData:
|
||||
case Svc::MemoryState::Transfered:
|
||||
case Svc::MemoryState::SharedTransfered:
|
||||
case Svc::MemoryState::SharedCode:
|
||||
case Svc::MemoryState::GeneratedCode:
|
||||
case Svc::MemoryState::CodeOut:
|
||||
case Svc::MemoryState::Coverage:
|
||||
case Svc::MemoryState::Insecure:
|
||||
case KMemoryState::Io:
|
||||
case KMemoryState::Shared:
|
||||
case KMemoryState::AliasCode:
|
||||
case KMemoryState::AliasCodeData:
|
||||
case KMemoryState::Transfered:
|
||||
case KMemoryState::SharedTransfered:
|
||||
case KMemoryState::SharedCode:
|
||||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return m_alias_code_region_start;
|
||||
case Svc::MemoryState::Code:
|
||||
case Svc::MemoryState::CodeData:
|
||||
case KMemoryState::Code:
|
||||
case KMemoryState::CodeData:
|
||||
return m_code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
|
||||
size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
||||
switch (state) {
|
||||
case Svc::MemoryState::Free:
|
||||
case Svc::MemoryState::Kernel:
|
||||
case KMemoryState::Free:
|
||||
case KMemoryState::Kernel:
|
||||
return m_address_space_end - m_address_space_start;
|
||||
case Svc::MemoryState::Normal:
|
||||
case KMemoryState::Normal:
|
||||
return m_heap_region_end - m_heap_region_start;
|
||||
case Svc::MemoryState::Ipc:
|
||||
case Svc::MemoryState::NonSecureIpc:
|
||||
case Svc::MemoryState::NonDeviceIpc:
|
||||
case KMemoryState::Ipc:
|
||||
case KMemoryState::NonSecureIpc:
|
||||
case KMemoryState::NonDeviceIpc:
|
||||
return m_alias_region_end - m_alias_region_start;
|
||||
case Svc::MemoryState::Stack:
|
||||
case KMemoryState::Stack:
|
||||
return m_stack_region_end - m_stack_region_start;
|
||||
case Svc::MemoryState::Static:
|
||||
case Svc::MemoryState::ThreadLocal:
|
||||
case KMemoryState::Static:
|
||||
case KMemoryState::ThreadLocal:
|
||||
return m_kernel_map_region_end - m_kernel_map_region_start;
|
||||
case Svc::MemoryState::Io:
|
||||
case Svc::MemoryState::Shared:
|
||||
case Svc::MemoryState::AliasCode:
|
||||
case Svc::MemoryState::AliasCodeData:
|
||||
case Svc::MemoryState::Transfered:
|
||||
case Svc::MemoryState::SharedTransfered:
|
||||
case Svc::MemoryState::SharedCode:
|
||||
case Svc::MemoryState::GeneratedCode:
|
||||
case Svc::MemoryState::CodeOut:
|
||||
case Svc::MemoryState::Coverage:
|
||||
case Svc::MemoryState::Insecure:
|
||||
case KMemoryState::Io:
|
||||
case KMemoryState::Shared:
|
||||
case KMemoryState::AliasCode:
|
||||
case KMemoryState::AliasCodeData:
|
||||
case KMemoryState::Transfered:
|
||||
case KMemoryState::SharedTransfered:
|
||||
case KMemoryState::SharedCode:
|
||||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
case Svc::MemoryState::Code:
|
||||
case Svc::MemoryState::CodeData:
|
||||
case KMemoryState::Code:
|
||||
case KMemoryState::CodeData:
|
||||
return m_code_region_end - m_code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
|
||||
bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||
const KProcessAddress end = addr + size;
|
||||
const KProcessAddress last = end - 1;
|
||||
|
||||
@@ -3219,32 +3192,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState
|
||||
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
|
||||
m_alias_region_start == m_alias_region_end);
|
||||
switch (state) {
|
||||
case Svc::MemoryState::Free:
|
||||
case Svc::MemoryState::Kernel:
|
||||
case KMemoryState::Free:
|
||||
case KMemoryState::Kernel:
|
||||
return is_in_region;
|
||||
case Svc::MemoryState::Io:
|
||||
case Svc::MemoryState::Static:
|
||||
case Svc::MemoryState::Code:
|
||||
case Svc::MemoryState::CodeData:
|
||||
case Svc::MemoryState::Shared:
|
||||
case Svc::MemoryState::AliasCode:
|
||||
case Svc::MemoryState::AliasCodeData:
|
||||
case Svc::MemoryState::Stack:
|
||||
case Svc::MemoryState::ThreadLocal:
|
||||
case Svc::MemoryState::Transfered:
|
||||
case Svc::MemoryState::SharedTransfered:
|
||||
case Svc::MemoryState::SharedCode:
|
||||
case Svc::MemoryState::GeneratedCode:
|
||||
case Svc::MemoryState::CodeOut:
|
||||
case Svc::MemoryState::Coverage:
|
||||
case Svc::MemoryState::Insecure:
|
||||
case KMemoryState::Io:
|
||||
case KMemoryState::Static:
|
||||
case KMemoryState::Code:
|
||||
case KMemoryState::CodeData:
|
||||
case KMemoryState::Shared:
|
||||
case KMemoryState::AliasCode:
|
||||
case KMemoryState::AliasCodeData:
|
||||
case KMemoryState::Stack:
|
||||
case KMemoryState::ThreadLocal:
|
||||
case KMemoryState::Transfered:
|
||||
case KMemoryState::SharedTransfered:
|
||||
case KMemoryState::SharedCode:
|
||||
case KMemoryState::GeneratedCode:
|
||||
case KMemoryState::CodeOut:
|
||||
case KMemoryState::Coverage:
|
||||
case KMemoryState::Insecure:
|
||||
return is_in_region && !is_in_heap && !is_in_alias;
|
||||
case Svc::MemoryState::Normal:
|
||||
case KMemoryState::Normal:
|
||||
ASSERT(is_in_heap);
|
||||
return is_in_region && !is_in_alias;
|
||||
case Svc::MemoryState::Ipc:
|
||||
case Svc::MemoryState::NonSecureIpc:
|
||||
case Svc::MemoryState::NonDeviceIpc:
|
||||
case KMemoryState::Ipc:
|
||||
case KMemoryState::NonSecureIpc:
|
||||
case KMemoryState::NonDeviceIpc:
|
||||
ASSERT(is_in_alias);
|
||||
return is_in_region && !is_in_heap;
|
||||
default:
|
||||
@@ -3308,16 +3281,21 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces
|
||||
|
||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||
KMemoryBlockManager::const_iterator it,
|
||||
KProcessAddress last_addr, KMemoryState state_mask,
|
||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Get information about the first block.
|
||||
const KProcessAddress last_addr = addr + size - 1;
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
||||
KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
// If the start address isn't aligned, we need a block.
|
||||
const size_t blocks_for_start_align =
|
||||
(Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
|
||||
|
||||
// Validate all blocks in the range have correct state.
|
||||
const KMemoryState first_state = info.m_state;
|
||||
const KMemoryPermission first_perm = info.m_permission;
|
||||
@@ -3343,6 +3321,10 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
|
||||
info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// If the end address isn't aligned, we need a block.
|
||||
const size_t blocks_for_end_align =
|
||||
(Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
||||
|
||||
// Write output state.
|
||||
if (out_state != nullptr) {
|
||||
*out_state = first_state;
|
||||
@@ -3353,39 +3335,9 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
|
||||
if (out_attr != nullptr) {
|
||||
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
||||
}
|
||||
|
||||
// If the end address isn't aligned, we need a block.
|
||||
if (out_blocks_needed != nullptr) {
|
||||
const size_t blocks_for_end_align =
|
||||
(Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
|
||||
? 1
|
||||
: 0;
|
||||
*out_blocks_needed = blocks_for_end_align;
|
||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Check memory state.
|
||||
const KProcessAddress last_addr = addr + size - 1;
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
||||
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
|
||||
|
||||
// If the start address isn't aligned, we need a block.
|
||||
if (out_blocks_needed != nullptr &&
|
||||
Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
|
||||
++(*out_blocks_needed);
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ public:
|
||||
explicit KPageTable(Core::System& system_);
|
||||
~KPageTable();
|
||||
|
||||
Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
|
||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
|
||||
KProcessAddress code_addr, size_t code_size,
|
||||
KSystemResource* system_resource, KResourceLimit* resource_limit,
|
||||
@@ -126,6 +126,8 @@ public:
|
||||
return m_block_info_manager;
|
||||
}
|
||||
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
||||
|
||||
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
|
||||
KPhysicalAddress phys_addr, KProcessAddress region_start,
|
||||
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
@@ -160,21 +162,6 @@ public:
|
||||
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
|
||||
const KPageGroup& pg);
|
||||
|
||||
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
|
||||
size_t GetRegionSize(Svc::MemoryState state) const;
|
||||
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
|
||||
|
||||
KProcessAddress GetRegionAddress(KMemoryState state) const {
|
||||
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||
}
|
||||
size_t GetRegionSize(KMemoryState state) const {
|
||||
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||
}
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||
return this->CanContain(addr, size,
|
||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
|
||||
}
|
||||
|
||||
protected:
|
||||
struct PageLinkedList {
|
||||
private:
|
||||
@@ -217,13 +204,12 @@ protected:
|
||||
private:
|
||||
enum class OperationType : u32 {
|
||||
Map = 0,
|
||||
MapGroup = 1,
|
||||
MapFirstGroup = 2,
|
||||
MapFirst = 1,
|
||||
MapGroup = 2,
|
||||
Unmap = 3,
|
||||
ChangePermissions = 4,
|
||||
ChangePermissionsAndRefresh = 5,
|
||||
ChangePermissionsAndRefreshAndFlush = 6,
|
||||
Separate = 7,
|
||||
Separate = 6,
|
||||
};
|
||||
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
@@ -242,6 +228,8 @@ private:
|
||||
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, KPhysicalAddress map_addr = 0);
|
||||
void FinalizeUpdate(PageLinkedList* page_list);
|
||||
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
||||
size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
||||
size_t num_pages, size_t alignment, size_t offset,
|
||||
@@ -262,13 +250,6 @@ private:
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
|
||||
KProcessAddress addr, size_t size, KMemoryState state_mask,
|
||||
@@ -400,7 +381,7 @@ public:
|
||||
constexpr size_t GetAliasCodeRegionSize() const {
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
}
|
||||
size_t GetNormalMemorySize() const {
|
||||
size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,59 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <list>
|
||||
#include <map>
|
||||
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include <string>
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_capabilities.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_page_table_manager.h"
|
||||
#include "core/hle/kernel/k_system_resource.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/k_worker_task.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
};
|
||||
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace FileSys {
|
||||
class ProgramMetadata;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class KSharedMemoryInfo;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
|
||||
enum class MemoryRegion : u16 {
|
||||
APPLICATION = 1,
|
||||
SYSTEM = 2,
|
||||
BASE = 3,
|
||||
};
|
||||
|
||||
enum class ProcessActivity : u32 {
|
||||
Runnable,
|
||||
Paused,
|
||||
};
|
||||
|
||||
enum class DebugWatchpointType : u8 {
|
||||
None = 0,
|
||||
Read = 1 << 0,
|
||||
@@ -36,6 +72,9 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
|
||||
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
|
||||
|
||||
public:
|
||||
explicit KProcess(KernelCore& kernel);
|
||||
~KProcess() override;
|
||||
|
||||
enum class State {
|
||||
Created = static_cast<u32>(Svc::ProcessState::Created),
|
||||
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
|
||||
@@ -47,83 +86,337 @@ public:
|
||||
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
|
||||
};
|
||||
|
||||
using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
/// Highest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMax = 80,
|
||||
|
||||
static constexpr size_t AslrAlignment = 2_MiB;
|
||||
/// Lowest allowed process ID for a userland process.
|
||||
ProcessIDMin = 81,
|
||||
/// Highest allowed process ID for a userland process.
|
||||
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr u64 InitialProcessIdMin = 1;
|
||||
static constexpr u64 InitialProcessIdMax = 0x50;
|
||||
// Used to determine how process IDs are assigned.
|
||||
enum class ProcessType {
|
||||
KernelInternal,
|
||||
Userland,
|
||||
};
|
||||
|
||||
static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
|
||||
static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit);
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& GetPageTable() {
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& GetPageTable() const {
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
KHandleTable& GetHandleTable() {
|
||||
return m_handle_table;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' handle table.
|
||||
const KHandleTable& GetHandleTable() const {
|
||||
return m_handle_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to process's memory.
|
||||
Core::Memory::Memory& GetMemory() const;
|
||||
|
||||
Result SignalToAddress(KProcessAddress address) {
|
||||
return m_condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
|
||||
return m_condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
||||
return m_condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
|
||||
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
KProcessAddress GetProcessLocalRegionAddress() const {
|
||||
return m_plr_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
State GetState() const {
|
||||
return m_state;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
u64 GetProcessId() const {
|
||||
return m_process_id;
|
||||
}
|
||||
|
||||
/// Gets the program ID corresponding to this process.
|
||||
u64 GetProgramId() const {
|
||||
return m_program_id;
|
||||
}
|
||||
|
||||
KProcessAddress GetEntryPoint() const {
|
||||
return m_code_address;
|
||||
}
|
||||
|
||||
/// Gets the resource limit descriptor for this process
|
||||
KResourceLimit* GetResourceLimit() const;
|
||||
|
||||
/// Gets the ideal CPU core ID for this process
|
||||
u8 GetIdealCoreId() const {
|
||||
return m_ideal_core;
|
||||
}
|
||||
|
||||
/// Checks if the specified thread priority is valid.
|
||||
bool CheckThreadPriority(s32 prio) const {
|
||||
return ((1ULL << prio) & GetPriorityMask()) != 0;
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed cores that this process' threads can run on.
|
||||
u64 GetCoreMask() const {
|
||||
return m_capabilities.GetCoreMask();
|
||||
}
|
||||
|
||||
/// Gets the bitmask of allowed thread priorities.
|
||||
u64 GetPriorityMask() const {
|
||||
return m_capabilities.GetPriorityMask();
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory to allocate for memory management.
|
||||
u32 GetSystemResourceSize() const {
|
||||
return m_system_resource_size;
|
||||
}
|
||||
|
||||
/// Gets the amount of secure memory currently in use for memory management.
|
||||
u32 GetSystemResourceUsage() const {
|
||||
// On hardware, this returns the amount of system resource memory that has
|
||||
// been used by the kernel. This is problematic for Yuzu to emulate, because
|
||||
// system resource memory is used for page tables -- and yuzu doesn't really
|
||||
// have a way to calculate how much memory is required for page tables for
|
||||
// the current process at any given time.
|
||||
// TODO: Is this even worth implementing? Games may retrieve this value via
|
||||
// an SDK function that gets used + available system resource size for debug
|
||||
// or diagnostic purposes. However, it seems unlikely that a game would make
|
||||
// decisions based on how much system memory is dedicated to its page tables.
|
||||
// Is returning a value other than zero wise?
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Whether this process is an AArch64 or AArch32 process.
|
||||
bool Is64BitProcess() const {
|
||||
return m_is_64bit_process;
|
||||
}
|
||||
|
||||
bool IsSuspended() const {
|
||||
return m_is_suspended;
|
||||
}
|
||||
|
||||
void SetSuspended(bool suspended) {
|
||||
m_is_suspended = suspended;
|
||||
}
|
||||
|
||||
/// Gets the total running time of the process instance in ticks.
|
||||
u64 GetCPUTimeTicks() const {
|
||||
return m_total_process_running_time_ticks;
|
||||
}
|
||||
|
||||
/// Updates the total running time, adding the given ticks to it.
|
||||
void UpdateCPUTimeTicks(u64 ticks) {
|
||||
m_total_process_running_time_ticks += ticks;
|
||||
}
|
||||
|
||||
/// Gets the process schedule count, used for thread yielding
|
||||
s64 GetScheduledCount() const {
|
||||
return m_schedule_count;
|
||||
}
|
||||
|
||||
/// Increments the process schedule count, used for thread yielding.
|
||||
void IncrementScheduledCount() {
|
||||
++m_schedule_count;
|
||||
}
|
||||
|
||||
void IncrementRunningThreadCount();
|
||||
void DecrementRunningThreadCount();
|
||||
|
||||
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
|
||||
m_running_threads[core] = thread;
|
||||
m_running_thread_idle_counts[core] = idle_count;
|
||||
}
|
||||
|
||||
void ClearRunningThread(KThread* thread) {
|
||||
for (size_t i = 0; i < m_running_threads.size(); ++i) {
|
||||
if (m_running_threads[i] == thread) {
|
||||
m_running_threads[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
|
||||
return m_running_threads[core];
|
||||
}
|
||||
|
||||
bool ReleaseUserException(KThread* thread);
|
||||
|
||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return m_pinned_threads[core_id];
|
||||
}
|
||||
|
||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||
u64 GetRandomEntropy(std::size_t index) const {
|
||||
return m_random_entropy.at(index);
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryAvailable();
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryUsed();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
std::list<KThread*>& GetThreadList() {
|
||||
return m_thread_list;
|
||||
}
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(KThread* thread);
|
||||
|
||||
/// Unregisters a thread from this process, removing it
|
||||
/// from this process' thread list.
|
||||
void UnregisterThread(KThread* thread);
|
||||
|
||||
/// Retrieves the number of available threads for this process.
|
||||
u64 GetFreeThreadCount() const;
|
||||
|
||||
/// Clears the signaled state of the process if and only if it's signaled.
|
||||
///
|
||||
/// @pre The process must not be already terminated. If this is called on a
|
||||
/// terminated process, then ResultInvalidState will be returned.
|
||||
///
|
||||
/// @pre The process must be in a signaled state. If this is called on a
|
||||
/// process instance that is not signaled, ResultInvalidState will be
|
||||
/// returned.
|
||||
Result Reset();
|
||||
|
||||
/**
|
||||
* Loads process-specifics configuration info with metadata provided
|
||||
* by an executable.
|
||||
*
|
||||
* @param metadata The provided metadata to load process specific info from.
|
||||
*
|
||||
* @returns ResultSuccess if all relevant metadata was able to be
|
||||
* loaded and parsed. Otherwise, an error code is returned.
|
||||
*/
|
||||
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
||||
bool is_hbl);
|
||||
|
||||
/**
|
||||
* Starts the main application thread for this process.
|
||||
*
|
||||
* @param main_thread_priority The priority for the main thread.
|
||||
* @param stack_size The stack size for the main thread in bytes.
|
||||
*/
|
||||
void Run(s32 main_thread_priority, u64 stack_size);
|
||||
|
||||
/**
|
||||
* Prepares a process for termination by stopping all of its threads
|
||||
* and clearing any other resources.
|
||||
*/
|
||||
void PrepareForTermination();
|
||||
|
||||
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
|
||||
|
||||
bool IsInitialized() const override {
|
||||
return m_is_initialized;
|
||||
}
|
||||
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
u64 GetId() const override {
|
||||
return GetProcessId();
|
||||
}
|
||||
|
||||
bool IsHbl() const {
|
||||
return m_is_hbl;
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void DoWorkerTaskImpl();
|
||||
|
||||
Result SetActivity(ProcessActivity activity);
|
||||
|
||||
void PinCurrentThread(s32 core_id);
|
||||
void UnpinCurrentThread(s32 core_id);
|
||||
void UnpinThread(KThread* thread);
|
||||
|
||||
KLightLock& GetStateLock() {
|
||||
return m_state_lock;
|
||||
}
|
||||
|
||||
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Debug watchpoint management
|
||||
|
||||
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
|
||||
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
// Attempts to remove the watchpoint specified by the given parameters.
|
||||
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
||||
return m_watchpoints;
|
||||
}
|
||||
|
||||
const std::string& GetName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
|
||||
using TLPTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
using TLPIterator = TLPTree::iterator;
|
||||
|
||||
private:
|
||||
KPageTable m_page_table;
|
||||
std::atomic<size_t> m_used_kernel_memory_size{};
|
||||
TLPTree m_fully_used_tlp_tree{};
|
||||
TLPTree m_partially_used_tlp_tree{};
|
||||
s32 m_ideal_core_id{};
|
||||
KResourceLimit* m_resource_limit{};
|
||||
KSystemResource* m_system_resource{};
|
||||
size_t m_memory_release_hint{};
|
||||
State m_state{};
|
||||
KLightLock m_state_lock;
|
||||
KLightLock m_list_lock;
|
||||
KConditionVariable m_cond_var;
|
||||
KAddressArbiter m_address_arbiter;
|
||||
std::array<u64, 4> m_entropy{};
|
||||
bool m_is_signaled{};
|
||||
bool m_is_initialized{};
|
||||
bool m_is_application{};
|
||||
bool m_is_default_application_system_resource{};
|
||||
bool m_is_hbl{};
|
||||
std::array<char, 13> m_name{};
|
||||
std::atomic<u16> m_num_running_threads{};
|
||||
Svc::CreateProcessFlag m_flags{};
|
||||
KMemoryManager::Pool m_memory_pool{};
|
||||
s64 m_schedule_count{};
|
||||
KCapabilities m_capabilities{};
|
||||
u64 m_program_id{};
|
||||
u64 m_process_id{};
|
||||
KProcessAddress m_code_address{};
|
||||
size_t m_code_size{};
|
||||
size_t m_main_thread_stack_size{};
|
||||
size_t m_max_process_memory{};
|
||||
u32 m_version{};
|
||||
KHandleTable m_handle_table;
|
||||
KProcessAddress m_plr_address{};
|
||||
KThread* m_exception_thread{};
|
||||
ThreadList m_thread_list{};
|
||||
SharedMemoryInfoList m_shared_memory_list{};
|
||||
bool m_is_suspended{};
|
||||
bool m_is_immortal{};
|
||||
bool m_is_handle_table_initialized{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
|
||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
|
||||
std::map<KProcessAddress, u64> m_debug_page_refcounts{};
|
||||
std::atomic<s64> m_cpu_time{};
|
||||
std::atomic<s64> m_num_process_switches{};
|
||||
std::atomic<s64> m_num_thread_switches{};
|
||||
std::atomic<s64> m_num_fpu_switches{};
|
||||
std::atomic<s64> m_num_supervisor_calls{};
|
||||
std::atomic<s64> m_num_ipc_messages{};
|
||||
std::atomic<s64> m_num_ipc_replies{};
|
||||
std::atomic<s64> m_num_ipc_receives{};
|
||||
|
||||
private:
|
||||
Result StartTermination();
|
||||
void FinishTermination();
|
||||
|
||||
void PinThread(s32 core_id, KThread* thread) {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
ASSERT(thread != nullptr);
|
||||
@@ -138,395 +431,6 @@ private:
|
||||
m_pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
public:
|
||||
explicit KProcess(KernelCore& kernel);
|
||||
~KProcess() override;
|
||||
|
||||
Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
|
||||
bool is_real);
|
||||
|
||||
Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
|
||||
std::span<const u32> caps, KResourceLimit* res_limit,
|
||||
KMemoryManager::Pool pool, bool immortal);
|
||||
Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
|
||||
KResourceLimit* res_limit, KMemoryManager::Pool pool);
|
||||
void Exit();
|
||||
|
||||
const char* GetName() const {
|
||||
return m_name.data();
|
||||
}
|
||||
|
||||
u64 GetProgramId() const {
|
||||
return m_program_id;
|
||||
}
|
||||
|
||||
u64 GetProcessId() const {
|
||||
return m_process_id;
|
||||
}
|
||||
|
||||
State GetState() const {
|
||||
return m_state;
|
||||
}
|
||||
|
||||
u64 GetCoreMask() const {
|
||||
return m_capabilities.GetCoreMask();
|
||||
}
|
||||
u64 GetPhysicalCoreMask() const {
|
||||
return m_capabilities.GetPhysicalCoreMask();
|
||||
}
|
||||
u64 GetPriorityMask() const {
|
||||
return m_capabilities.GetPriorityMask();
|
||||
}
|
||||
|
||||
s32 GetIdealCoreId() const {
|
||||
return m_ideal_core_id;
|
||||
}
|
||||
void SetIdealCoreId(s32 core_id) {
|
||||
m_ideal_core_id = core_id;
|
||||
}
|
||||
|
||||
bool CheckThreadPriority(s32 prio) const {
|
||||
return ((1ULL << prio) & this->GetPriorityMask()) != 0;
|
||||
}
|
||||
|
||||
u32 GetCreateProcessFlags() const {
|
||||
return static_cast<u32>(m_flags);
|
||||
}
|
||||
|
||||
bool Is64Bit() const {
|
||||
return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
|
||||
}
|
||||
|
||||
KProcessAddress GetEntryPoint() const {
|
||||
return m_code_address;
|
||||
}
|
||||
|
||||
size_t GetMainStackSize() const {
|
||||
return m_main_thread_stack_size;
|
||||
}
|
||||
|
||||
KMemoryManager::Pool GetMemoryPool() const {
|
||||
return m_memory_pool;
|
||||
}
|
||||
|
||||
u64 GetRandomEntropy(size_t i) const {
|
||||
return m_entropy[i];
|
||||
}
|
||||
|
||||
bool IsApplication() const {
|
||||
return m_is_application;
|
||||
}
|
||||
|
||||
bool IsDefaultApplicationSystemResource() const {
|
||||
return m_is_default_application_system_resource;
|
||||
}
|
||||
|
||||
bool IsSuspended() const {
|
||||
return m_is_suspended;
|
||||
}
|
||||
void SetSuspended(bool suspended) {
|
||||
m_is_suspended = suspended;
|
||||
}
|
||||
|
||||
Result Terminate();
|
||||
|
||||
bool IsTerminated() const {
|
||||
return m_state == State::Terminated;
|
||||
}
|
||||
|
||||
bool IsPermittedSvc(u32 svc_id) const {
|
||||
return m_capabilities.IsPermittedSvc(svc_id);
|
||||
}
|
||||
|
||||
bool IsPermittedInterrupt(s32 interrupt_id) const {
|
||||
return m_capabilities.IsPermittedInterrupt(interrupt_id);
|
||||
}
|
||||
|
||||
bool IsPermittedDebug() const {
|
||||
return m_capabilities.IsPermittedDebug();
|
||||
}
|
||||
|
||||
bool CanForceDebug() const {
|
||||
return m_capabilities.CanForceDebug();
|
||||
}
|
||||
|
||||
bool IsHbl() const {
|
||||
return m_is_hbl;
|
||||
}
|
||||
|
||||
Kernel::KMemoryManager::Direction GetAllocateOption() const {
|
||||
// TODO: property of the KPageTableBase
|
||||
return KMemoryManager::Direction::FromFront;
|
||||
}
|
||||
|
||||
ThreadList& GetThreadList() {
|
||||
return m_thread_list;
|
||||
}
|
||||
const ThreadList& GetThreadList() const {
|
||||
return m_thread_list;
|
||||
}
|
||||
|
||||
bool EnterUserException();
|
||||
bool LeaveUserException();
|
||||
bool ReleaseUserException(KThread* thread);
|
||||
|
||||
KThread* GetPinnedThread(s32 core_id) const {
|
||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return m_pinned_threads[core_id];
|
||||
}
|
||||
|
||||
const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
|
||||
return m_capabilities.GetSvcPermissions();
|
||||
}
|
||||
|
||||
KResourceLimit* GetResourceLimit() const {
|
||||
return m_resource_limit;
|
||||
}
|
||||
|
||||
bool ReserveResource(Svc::LimitableResource which, s64 value);
|
||||
bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
|
||||
void ReleaseResource(Svc::LimitableResource which, s64 value);
|
||||
void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
|
||||
|
||||
KLightLock& GetStateLock() {
|
||||
return m_state_lock;
|
||||
}
|
||||
KLightLock& GetListLock() {
|
||||
return m_list_lock;
|
||||
}
|
||||
|
||||
KPageTable& GetPageTable() {
|
||||
return m_page_table;
|
||||
}
|
||||
const KPageTable& GetPageTable() const {
|
||||
return m_page_table;
|
||||
}
|
||||
|
||||
KHandleTable& GetHandleTable() {
|
||||
return m_handle_table;
|
||||
}
|
||||
const KHandleTable& GetHandleTable() const {
|
||||
return m_handle_table;
|
||||
}
|
||||
|
||||
size_t GetUsedUserPhysicalMemorySize() const;
|
||||
size_t GetTotalUserPhysicalMemorySize() const;
|
||||
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
|
||||
size_t GetTotalNonSystemUserPhysicalMemorySize() const;
|
||||
|
||||
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
|
||||
|
||||
Result CreateThreadLocalRegion(KProcessAddress* out);
|
||||
Result DeleteThreadLocalRegion(KProcessAddress addr);
|
||||
|
||||
KProcessAddress GetProcessLocalRegionAddress() const {
|
||||
return m_plr_address;
|
||||
}
|
||||
|
||||
KThread* GetExceptionThread() const {
|
||||
return m_exception_thread;
|
||||
}
|
||||
|
||||
void AddCpuTime(s64 diff) {
|
||||
m_cpu_time += diff;
|
||||
}
|
||||
s64 GetCpuTime() {
|
||||
return m_cpu_time.load();
|
||||
}
|
||||
|
||||
s64 GetScheduledCount() const {
|
||||
return m_schedule_count;
|
||||
}
|
||||
void IncrementScheduledCount() {
|
||||
++m_schedule_count;
|
||||
}
|
||||
|
||||
void IncrementRunningThreadCount();
|
||||
void DecrementRunningThreadCount();
|
||||
|
||||
size_t GetRequiredSecureMemorySizeNonDefault() const {
|
||||
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||
return secure_system_resource->CalculateRequiredSecureMemorySize();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GetRequiredSecureMemorySize() const {
|
||||
if (m_system_resource->IsSecureResource()) {
|
||||
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||
return secure_system_resource->CalculateRequiredSecureMemorySize();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GetTotalSystemResourceSize() const {
|
||||
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||
return secure_system_resource->GetSize();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t GetUsedSystemResourceSize() const {
|
||||
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
|
||||
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
|
||||
return secure_system_resource->GetUsedSize();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
|
||||
m_running_threads[core] = thread;
|
||||
m_running_thread_idle_counts[core] = idle_count;
|
||||
m_running_thread_switch_counts[core] = switch_count;
|
||||
}
|
||||
|
||||
void ClearRunningThread(KThread* thread) {
|
||||
for (size_t i = 0; i < m_running_threads.size(); ++i) {
|
||||
if (m_running_threads[i] == thread) {
|
||||
m_running_threads[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const KSystemResource& GetSystemResource() const {
|
||||
return *m_system_resource;
|
||||
}
|
||||
|
||||
const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
|
||||
return m_system_resource->GetMemoryBlockSlabManager();
|
||||
}
|
||||
const KBlockInfoManager& GetBlockInfoManager() const {
|
||||
return m_system_resource->GetBlockInfoManager();
|
||||
}
|
||||
const KPageTableManager& GetPageTableManager() const {
|
||||
return m_system_resource->GetPageTableManager();
|
||||
}
|
||||
|
||||
KThread* GetRunningThread(s32 core) const {
|
||||
return m_running_threads[core];
|
||||
}
|
||||
u64 GetRunningThreadIdleCount(s32 core) const {
|
||||
return m_running_thread_idle_counts[core];
|
||||
}
|
||||
u64 GetRunningThreadSwitchCount(s32 core) const {
|
||||
return m_running_thread_switch_counts[core];
|
||||
}
|
||||
|
||||
void RegisterThread(KThread* thread);
|
||||
void UnregisterThread(KThread* thread);
|
||||
|
||||
Result Run(s32 priority, size_t stack_size);
|
||||
|
||||
Result Reset();
|
||||
|
||||
void SetDebugBreak() {
|
||||
if (m_state == State::RunningAttached) {
|
||||
this->ChangeState(State::DebugBreak);
|
||||
}
|
||||
}
|
||||
|
||||
void SetAttached() {
|
||||
if (m_state == State::DebugBreak) {
|
||||
this->ChangeState(State::RunningAttached);
|
||||
}
|
||||
}
|
||||
|
||||
Result SetActivity(Svc::ProcessActivity activity);
|
||||
|
||||
void PinCurrentThread();
|
||||
void UnpinCurrentThread();
|
||||
void UnpinThread(KThread* thread);
|
||||
|
||||
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
|
||||
return m_cond_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
|
||||
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
|
||||
|
||||
static void Switch(KProcess* cur_process, KProcess* next_process);
|
||||
|
||||
public:
|
||||
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
|
||||
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
// Attempts to remove the watchpoint specified by the given parameters.
|
||||
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
||||
return m_watchpoints;
|
||||
}
|
||||
|
||||
public:
|
||||
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
|
||||
bool is_hbl);
|
||||
|
||||
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
|
||||
|
||||
Core::Memory::Memory& GetMemory() const;
|
||||
|
||||
public:
|
||||
// Overridden parent functions.
|
||||
bool IsInitialized() const override {
|
||||
return m_is_initialized;
|
||||
}
|
||||
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
u64 GetIdImpl() const {
|
||||
return this->GetProcessId();
|
||||
}
|
||||
u64 GetId() const override {
|
||||
return this->GetIdImpl();
|
||||
}
|
||||
|
||||
virtual bool IsSignaled() const override {
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
return m_is_signaled;
|
||||
}
|
||||
|
||||
void DoWorkerTaskImpl();
|
||||
|
||||
private:
|
||||
void ChangeState(State new_state) {
|
||||
if (m_state != new_state) {
|
||||
m_state = new_state;
|
||||
m_is_signaled = true;
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
|
||||
Result InitializeHandleTable(s32 size) {
|
||||
// Try to initialize the handle table.
|
||||
R_TRY(m_handle_table.Initialize(size));
|
||||
|
||||
// We succeeded, so note that we did.
|
||||
m_is_handle_table_initialized = true;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
m_handle_table.Finalize();
|
||||
@@ -534,6 +438,118 @@ private:
|
||||
// Note that the table is finalized.
|
||||
m_is_handle_table_initialized = false;
|
||||
}
|
||||
|
||||
void ChangeState(State new_state);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
KPageTable m_page_table;
|
||||
|
||||
/// Current status of the process
|
||||
State m_state{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 m_process_id = 0;
|
||||
|
||||
/// Title ID corresponding to the process
|
||||
u64 m_program_id = 0;
|
||||
|
||||
/// Specifies additional memory to be reserved for the process's memory management by the
|
||||
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
|
||||
/// instead of using the normal global page tables/memory block management.
|
||||
u32 m_system_resource_size = 0;
|
||||
|
||||
/// Resource limit descriptor for this process
|
||||
KResourceLimit* m_resource_limit{};
|
||||
|
||||
KVirtualAddress m_system_resource_address{};
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 m_ideal_core = 0;
|
||||
|
||||
/// Contains the parsed process capability descriptors.
|
||||
ProcessCapabilities m_capabilities;
|
||||
|
||||
/// Whether or not this process is AArch64, or AArch32.
|
||||
/// By default, we currently assume this is true, unless otherwise
|
||||
/// specified by metadata provided to the process during loading.
|
||||
bool m_is_64bit_process = true;
|
||||
|
||||
/// Total running time for the process in ticks.
|
||||
std::atomic<u64> m_total_process_running_time_ticks = 0;
|
||||
|
||||
/// Per-process handle table for storing created object handles in.
|
||||
KHandleTable m_handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
KAddressArbiter m_address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
KConditionVariable m_condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
KProcessAddress m_plr_address = 0;
|
||||
|
||||
/// Address indicating the location of the process's entry point.
|
||||
KProcessAddress m_code_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<KThread*> m_thread_list;
|
||||
|
||||
/// List of shared memory that are running with this process as their owner.
|
||||
std::list<KSharedMemoryInfo*> m_shared_memory_list;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
KProcessAddress m_main_thread_stack_top{};
|
||||
|
||||
/// Size of the main thread's stack
|
||||
std::size_t m_main_thread_stack_size{};
|
||||
|
||||
/// Memory usage capacity for the process
|
||||
std::size_t m_memory_usage_capacity{};
|
||||
|
||||
/// Process total image size
|
||||
std::size_t m_image_size{};
|
||||
|
||||
/// Schedule count of this process
|
||||
s64 m_schedule_count{};
|
||||
|
||||
size_t m_memory_release_hint{};
|
||||
|
||||
std::string name{};
|
||||
|
||||
bool m_is_signaled{};
|
||||
bool m_is_suspended{};
|
||||
bool m_is_immortal{};
|
||||
bool m_is_handle_table_initialized{};
|
||||
bool m_is_initialized{};
|
||||
bool m_is_hbl{};
|
||||
|
||||
std::atomic<u16> m_num_running_threads{};
|
||||
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
|
||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
|
||||
std::map<KProcessAddress, u64> m_debug_page_refcounts;
|
||||
|
||||
KThread* m_exception_thread{};
|
||||
|
||||
KLightLock m_state_lock;
|
||||
KLightLock m_list_lock;
|
||||
|
||||
using TLPTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
using TLPIterator = TLPTree::iterator;
|
||||
TLPTree m_fully_used_tlp_tree;
|
||||
TLPTree m_partially_used_tlp_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||
if (m_state.should_count_idle) {
|
||||
if (highest_thread != nullptr) [[likely]] {
|
||||
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
|
||||
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
|
||||
}
|
||||
} else {
|
||||
m_state.idle_count++;
|
||||
@@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
|
||||
const s64 tick_diff = cur_tick - prev_tick;
|
||||
cur_thread->AddCpuTime(m_core_id, tick_diff);
|
||||
if (cur_process != nullptr) {
|
||||
cur_process->AddCpuTime(tick_diff);
|
||||
cur_process->UpdateCPUTimeTicks(tick_diff);
|
||||
}
|
||||
m_last_context_switch_time = cur_tick;
|
||||
|
||||
|
||||
@@ -1,100 +1,25 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_system_resource.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
|
||||
KMemoryManager::Pool pool) {
|
||||
// Set members.
|
||||
m_resource_limit = resource_limit;
|
||||
m_resource_size = size;
|
||||
m_resource_pool = pool;
|
||||
|
||||
// Determine required size for our secure resource.
|
||||
const size_t secure_size = this->CalculateRequiredSecureMemorySize();
|
||||
|
||||
// Reserve memory for our secure resource.
|
||||
KScopedResourceReservation memory_reservation(
|
||||
m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate secure memory.
|
||||
R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
|
||||
m_resource_size, static_cast<u32>(m_resource_pool)));
|
||||
ASSERT(m_resource_address != 0);
|
||||
|
||||
// Ensure we clean up the secure memory, if we fail past this point.
|
||||
ON_RESULT_FAILURE {
|
||||
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
|
||||
static_cast<u32>(m_resource_pool));
|
||||
};
|
||||
|
||||
// Check that our allocation is bigger than the reference counts needed for it.
|
||||
const size_t rc_size =
|
||||
Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
|
||||
R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
|
||||
|
||||
// Get resource pointer.
|
||||
KPhysicalAddress resource_paddr =
|
||||
KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
|
||||
auto* resource =
|
||||
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
|
||||
|
||||
// Initialize slab heaps.
|
||||
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
|
||||
PageSize);
|
||||
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
|
||||
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||
|
||||
// Initialize managers.
|
||||
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||
std::addressof(m_page_table_heap));
|
||||
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||
std::addressof(m_memory_block_heap));
|
||||
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
|
||||
std::addressof(m_block_info_heap));
|
||||
|
||||
// Set our managers.
|
||||
this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
|
||||
|
||||
// Commit the memory reservation.
|
||||
memory_reservation.Commit();
|
||||
|
||||
// Open reference to our resource limit.
|
||||
m_resource_limit->Open();
|
||||
|
||||
// Set ourselves as initialized.
|
||||
m_is_initialized = true;
|
||||
|
||||
R_SUCCEED();
|
||||
// Unimplemented
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KSecureSystemResource::Finalize() {
|
||||
// Check that we have no outstanding allocations.
|
||||
ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
|
||||
ASSERT(m_block_info_manager.GetUsed() == 0);
|
||||
ASSERT(m_page_table_manager.GetUsed() == 0);
|
||||
|
||||
// Free our secure memory.
|
||||
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
|
||||
static_cast<u32>(m_resource_pool));
|
||||
|
||||
// Release the memory reservation.
|
||||
m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
|
||||
this->CalculateRequiredSecureMemorySize());
|
||||
|
||||
// Close reference to our resource limit.
|
||||
m_resource_limit->Close();
|
||||
// Unimplemented
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
|
||||
KMemoryManager::Pool pool) {
|
||||
return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
|
||||
// Unimplemented
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -122,15 +122,16 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
|
||||
case ThreadType::Main:
|
||||
ASSERT(arg == 0);
|
||||
[[fallthrough]];
|
||||
case ThreadType::HighPriority:
|
||||
[[fallthrough]];
|
||||
case ThreadType::Dummy:
|
||||
[[fallthrough]];
|
||||
case ThreadType::User:
|
||||
ASSERT(((owner == nullptr) ||
|
||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
|
||||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
|
||||
break;
|
||||
case ThreadType::HighPriority:
|
||||
case ThreadType::Dummy:
|
||||
break;
|
||||
case ThreadType::Kernel:
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
@@ -215,7 +216,6 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
|
||||
// Setup the TLS, if needed.
|
||||
if (type == ThreadType::User) {
|
||||
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
|
||||
owner->GetMemory().ZeroBlock(m_tls_address, Svc::ThreadLocalRegionSize);
|
||||
}
|
||||
|
||||
m_parent = owner;
|
||||
@@ -403,7 +403,7 @@ void KThread::StartTermination() {
|
||||
if (m_parent != nullptr) {
|
||||
m_parent->ReleaseUserException(this);
|
||||
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
|
||||
m_parent->UnpinCurrentThread();
|
||||
m_parent->UnpinCurrentThread(m_core_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,6 +415,10 @@ void KThread::StartTermination() {
|
||||
m_parent->ClearRunningThread(this);
|
||||
}
|
||||
|
||||
// Signal.
|
||||
m_signaled = true;
|
||||
KSynchronizationObject::NotifyAvailable();
|
||||
|
||||
// Clear previous thread in KScheduler.
|
||||
KScheduler::ClearPreviousThread(m_kernel, this);
|
||||
|
||||
@@ -433,13 +437,6 @@ void KThread::FinishTermination() {
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire the scheduler lock.
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Signal.
|
||||
m_signaled = true;
|
||||
KSynchronizationObject::NotifyAvailable();
|
||||
|
||||
// Close the thread.
|
||||
this->Close();
|
||||
}
|
||||
@@ -823,7 +820,7 @@ void KThread::CloneFpuStatus() {
|
||||
ASSERT(this->GetOwnerProcess() != nullptr);
|
||||
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
|
||||
|
||||
if (this->GetOwnerProcess()->Is64Bit()) {
|
||||
if (this->GetOwnerProcess()->Is64BitProcess()) {
|
||||
// Clone FPSR and FPCR.
|
||||
ThreadContext64 cur_ctx{};
|
||||
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
|
||||
@@ -926,7 +923,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
|
||||
|
||||
// If we're not terminating, get the thread's user context.
|
||||
if (!this->IsTerminationRequested()) {
|
||||
if (m_parent->Is64Bit()) {
|
||||
if (m_parent->Is64BitProcess()) {
|
||||
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
|
||||
auto context = GetContext64();
|
||||
context.pstate &= 0xFF0FFE20;
|
||||
@@ -1177,9 +1174,6 @@ Result KThread::Run() {
|
||||
owner->IncrementRunningThreadCount();
|
||||
}
|
||||
|
||||
// Open a reference, now that we're running.
|
||||
this->Open();
|
||||
|
||||
// Set our state and finish.
|
||||
this->SetState(ThreadState::Runnable);
|
||||
|
||||
|
||||
@@ -721,7 +721,6 @@ private:
|
||||
// For core KThread implementation
|
||||
ThreadContext32 m_thread_context_32{};
|
||||
ThreadContext64 m_thread_context_64{};
|
||||
Common::IntrusiveListNode m_process_list_node;
|
||||
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
|
||||
s32 m_priority{};
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
|
||||
@@ -101,31 +101,35 @@ struct KernelCore::Impl {
|
||||
|
||||
void InitializeCores() {
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
cores[core_id]->Initialize((*application_process).Is64Bit());
|
||||
cores[core_id]->Initialize((*application_process).Is64BitProcess());
|
||||
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
|
||||
}
|
||||
}
|
||||
|
||||
void TerminateApplicationProcess() {
|
||||
application_process.load()->Terminate();
|
||||
void CloseApplicationProcess() {
|
||||
KProcess* old_process = application_process.exchange(nullptr);
|
||||
if (old_process == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// old_process->Close();
|
||||
// TODO: The process should be destroyed based on accurate ref counting after
|
||||
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
||||
old_process->Finalize();
|
||||
old_process->Destroy();
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||
|
||||
CloseServices();
|
||||
|
||||
auto* old_process = application_process.exchange(nullptr);
|
||||
if (old_process) {
|
||||
old_process->Close();
|
||||
}
|
||||
|
||||
process_list.clear();
|
||||
|
||||
CloseServices();
|
||||
|
||||
next_object_id = 0;
|
||||
next_kernel_process_id = KProcess::InitialProcessIdMin;
|
||||
next_user_process_id = KProcess::ProcessIdMin;
|
||||
next_kernel_process_id = KProcess::InitialKIPIDMin;
|
||||
next_user_process_id = KProcess::ProcessIDMin;
|
||||
next_thread_id = 1;
|
||||
|
||||
global_handle_table->Finalize();
|
||||
@@ -172,6 +176,8 @@ struct KernelCore::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
CloseApplicationProcess();
|
||||
|
||||
// Track kernel objects that were not freed on shutdown
|
||||
{
|
||||
std::scoped_lock lk{registered_objects_lock};
|
||||
@@ -338,8 +344,6 @@ struct KernelCore::Impl {
|
||||
// Create the system page table managers.
|
||||
app_system_resource = std::make_unique<KSystemResource>(kernel);
|
||||
sys_system_resource = std::make_unique<KSystemResource>(kernel);
|
||||
KAutoObject::Create(std::addressof(*app_system_resource));
|
||||
KAutoObject::Create(std::addressof(*sys_system_resource));
|
||||
|
||||
// Set the managers for the system resources.
|
||||
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
|
||||
@@ -619,33 +623,14 @@ struct KernelCore::Impl {
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||
|
||||
// Insert a physical region for the secure applet memory.
|
||||
const auto secure_applet_end_phys_addr =
|
||||
slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
|
||||
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize,
|
||||
KMemoryRegionType_DramKernelSecureAppletMemory));
|
||||
}
|
||||
|
||||
// Insert a physical region for the unknown debug2 region.
|
||||
constexpr size_t SecureUnknownRegionSize = 0;
|
||||
const size_t secure_unknown_size = SecureUnknownRegionSize;
|
||||
const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size;
|
||||
if constexpr (SecureUnknownRegionSize > 0) {
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
GetInteger(secure_applet_end_phys_addr), secure_unknown_size,
|
||||
KMemoryRegionType_DramKernelSecureUnknown));
|
||||
}
|
||||
|
||||
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
||||
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||
const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr;
|
||||
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
|
||||
ASSERT(page_table_heap_size / 4_MiB > 2);
|
||||
|
||||
// Insert a physical region for the kernel page table heap region
|
||||
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
|
||||
GetInteger(secure_unknown_end_phys_addr), page_table_heap_size,
|
||||
GetInteger(slab_end_phys_addr), page_table_heap_size,
|
||||
KMemoryRegionType_DramKernelPtHeap));
|
||||
|
||||
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
||||
@@ -788,8 +773,8 @@ struct KernelCore::Impl {
|
||||
std::mutex registered_in_use_objects_lock;
|
||||
|
||||
std::atomic<u32> next_object_id{0};
|
||||
std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
|
||||
std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
|
||||
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
|
||||
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
|
||||
std::atomic<u64> next_thread_id{1};
|
||||
|
||||
// Lists all processes that exist in the current session.
|
||||
@@ -920,6 +905,10 @@ const KProcess* KernelCore::ApplicationProcess() const {
|
||||
return impl->application_process;
|
||||
}
|
||||
|
||||
void KernelCore::CloseApplicationProcess() {
|
||||
impl->CloseApplicationProcess();
|
||||
}
|
||||
|
||||
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
}
|
||||
@@ -1120,8 +1109,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
|
||||
std::function<void()> func) {
|
||||
// Make a new process.
|
||||
KProcess* process = KProcess::Create(*this);
|
||||
ASSERT(R_SUCCEEDED(
|
||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
|
||||
GetSystemResourceLimit())));
|
||||
|
||||
// Ensure that we don't hold onto any extra references.
|
||||
SCOPE_EXIT({ process->Close(); });
|
||||
@@ -1148,8 +1137,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
|
||||
|
||||
// Make a new process.
|
||||
KProcess* process = KProcess::Create(*this);
|
||||
ASSERT(R_SUCCEEDED(
|
||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
|
||||
GetSystemResourceLimit())));
|
||||
|
||||
// Ensure that we don't hold onto any extra references.
|
||||
SCOPE_EXIT({ process->Close(); });
|
||||
@@ -1258,8 +1247,7 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
|
||||
|
||||
void KernelCore::SuspendApplication(bool suspended) {
|
||||
const bool should_suspend{exception_exited || suspended};
|
||||
const auto activity =
|
||||
should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
|
||||
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
|
||||
|
||||
// Get the application process.
|
||||
KScopedAutoObject<KProcess> process = ApplicationProcess();
|
||||
@@ -1293,8 +1281,6 @@ void KernelCore::SuspendApplication(bool suspended) {
|
||||
}
|
||||
|
||||
void KernelCore::ShutdownCores() {
|
||||
impl->TerminateApplicationProcess();
|
||||
|
||||
KScopedSchedulerLock lk{*this};
|
||||
|
||||
for (auto* thread : impl->shutdown_threads) {
|
||||
|
||||
@@ -134,6 +134,9 @@ public:
|
||||
/// Retrieves a const pointer to the application process.
|
||||
const KProcess* ApplicationProcess() const;
|
||||
|
||||
/// Closes the application process.
|
||||
void CloseApplicationProcess();
|
||||
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<KProcess*>& GetProcessList() const;
|
||||
|
||||
|
||||
@@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.EnterSVCProfile();
|
||||
|
||||
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
|
||||
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
|
||||
Call64(system, imm);
|
||||
} else {
|
||||
Call32(system, imm);
|
||||
|
||||
@@ -86,19 +86,20 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::TotalMemorySize:
|
||||
*result = process->GetTotalUserPhysicalMemorySize();
|
||||
*result = process->GetTotalPhysicalMemoryAvailable();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::UsedMemorySize:
|
||||
*result = process->GetUsedUserPhysicalMemorySize();
|
||||
*result = process->GetTotalPhysicalMemoryUsed();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::SystemResourceSizeTotal:
|
||||
*result = process->GetTotalSystemResourceSize();
|
||||
*result = process->GetSystemResourceSize();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::SystemResourceSizeUsed:
|
||||
*result = process->GetUsedSystemResourceSize();
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
|
||||
*result = process->GetSystemResourceUsage();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::ProgramId:
|
||||
@@ -110,29 +111,20 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::TotalNonSystemMemorySize:
|
||||
*result = process->GetTotalNonSystemUserPhysicalMemorySize();
|
||||
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::UsedNonSystemMemorySize:
|
||||
*result = process->GetUsedNonSystemUserPhysicalMemorySize();
|
||||
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::IsApplication:
|
||||
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
|
||||
*result = process->IsApplication();
|
||||
*result = true;
|
||||
R_SUCCEED();
|
||||
|
||||
case InfoType::FreeThreadCount:
|
||||
if (KResourceLimit* resource_limit = process->GetResourceLimit();
|
||||
resource_limit != nullptr) {
|
||||
const auto current_value =
|
||||
resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
|
||||
const auto limit_value =
|
||||
resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
|
||||
*result = limit_value - current_value;
|
||||
} else {
|
||||
*result = 0;
|
||||
}
|
||||
*result = process->GetFreeThreadCount();
|
||||
R_SUCCEED();
|
||||
|
||||
default:
|
||||
@@ -169,7 +161,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
|
||||
|
||||
case InfoType::RandomEntropy:
|
||||
R_UNLESS(handle == 0, ResultInvalidHandle);
|
||||
R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
|
||||
R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
|
||||
|
||||
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
|
||||
R_SUCCEED();
|
||||
|
||||
@@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
|
||||
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
||||
|
||||
R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
|
||||
R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
|
||||
}
|
||||
|
||||
/// Unlock a mutex
|
||||
@@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
|
||||
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
|
||||
|
||||
R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
|
||||
R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
|
||||
}
|
||||
|
||||
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
|
||||
|
||||
@@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s
|
||||
} // namespace
|
||||
|
||||
Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size,
|
||||
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
|
||||
perm);
|
||||
|
||||
// Validate address / size.
|
||||
@@ -108,16 +108,10 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
|
||||
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Validate the attribute and mask.
|
||||
constexpr u32 SupportedMask =
|
||||
static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked);
|
||||
constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
|
||||
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
|
||||
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
|
||||
|
||||
// Check that permission locked is either being set or not masked.
|
||||
R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) ==
|
||||
(static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked),
|
||||
ResultInvalidCombination);
|
||||
|
||||
// Validate that the region is in range for the current process.
|
||||
auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
|
||||
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
|
||||
|
||||
@@ -46,7 +46,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
|
||||
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
||||
auto& page_table{current_process->GetPageTable()};
|
||||
|
||||
if (current_process->GetTotalSystemResourceSize() == 0) {
|
||||
if (current_process->GetSystemResourceSize() == 0) {
|
||||
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
@@ -95,7 +95,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
|
||||
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
|
||||
auto& page_table{current_process->GetPageTable()};
|
||||
|
||||
if (current_process->GetTotalSystemResourceSize() == 0) {
|
||||
if (current_process->GetSystemResourceSize() == 0) {
|
||||
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
|
||||
GetCurrentThread(kernel).ClearInterruptFlag();
|
||||
|
||||
// Unpin the current thread.
|
||||
cur_process->UnpinCurrentThread();
|
||||
cur_process->UnpinCurrentThread(core_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -85,6 +85,10 @@ Result StartThread(Core::System& system, Handle thread_handle) {
|
||||
// Try to start the thread.
|
||||
R_TRY(thread->Run());
|
||||
|
||||
// If we succeeded, persist a reference to the thread.
|
||||
thread->Open();
|
||||
system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
@@ -95,6 +99,7 @@ void ExitThread(Core::System& system) {
|
||||
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
system.GlobalSchedulerContext().RemoveThread(current_thread);
|
||||
current_thread->Exit();
|
||||
system.Kernel().UnregisterInUseObject(current_thread);
|
||||
}
|
||||
|
||||
/// Sleep the current thread
|
||||
@@ -255,7 +260,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
|
||||
|
||||
auto list_iter = thread_list.cbegin();
|
||||
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
||||
memory.Write64(out_thread_ids, list_iter->GetThreadId());
|
||||
memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
|
||||
out_thread_ids += sizeof(u64);
|
||||
}
|
||||
|
||||
|
||||
@@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.EnterSVCProfile();
|
||||
|
||||
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
|
||||
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
|
||||
Call64(system, imm);
|
||||
} else {
|
||||
Call32(system, imm);
|
||||
|
||||
@@ -46,7 +46,6 @@ enum class MemoryAttribute : u32 {
|
||||
IpcLocked = (1 << 1),
|
||||
DeviceShared = (1 << 2),
|
||||
Uncached = (1 << 3),
|
||||
PermissionLocked = (1 << 4),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
|
||||
@@ -604,57 +603,13 @@ enum class ProcessActivity : u32 {
|
||||
Paused,
|
||||
};
|
||||
|
||||
enum class CreateProcessFlag : u32 {
|
||||
// Is 64 bit?
|
||||
Is64Bit = (1 << 0),
|
||||
|
||||
// What kind of address space?
|
||||
AddressSpaceShift = 1,
|
||||
AddressSpaceMask = (7 << AddressSpaceShift),
|
||||
AddressSpace32Bit = (0 << AddressSpaceShift),
|
||||
AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
|
||||
AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
|
||||
AddressSpace64Bit = (3 << AddressSpaceShift),
|
||||
|
||||
// Should JIT debug be done on crash?
|
||||
EnableDebug = (1 << 4),
|
||||
|
||||
// Should ASLR be enabled for the process?
|
||||
EnableAslr = (1 << 5),
|
||||
|
||||
// Is the process an application?
|
||||
IsApplication = (1 << 6),
|
||||
|
||||
// 4.x deprecated: Should use secure memory?
|
||||
DeprecatedUseSecureMemory = (1 << 7),
|
||||
|
||||
// 5.x+ Pool partition type.
|
||||
PoolPartitionShift = 7,
|
||||
PoolPartitionMask = (0xF << PoolPartitionShift),
|
||||
PoolPartitionApplication = (0 << PoolPartitionShift),
|
||||
PoolPartitionApplet = (1 << PoolPartitionShift),
|
||||
PoolPartitionSystem = (2 << PoolPartitionShift),
|
||||
PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
|
||||
|
||||
// 7.x+ Should memory allocation be optimized? This requires IsApplication.
|
||||
OptimizeMemoryAllocation = (1 << 11),
|
||||
|
||||
// 11.x+ DisableDeviceAddressSpaceMerge.
|
||||
DisableDeviceAddressSpaceMerge = (1 << 12),
|
||||
|
||||
// Mask of all flags.
|
||||
All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
|
||||
PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
|
||||
|
||||
struct CreateProcessParameter {
|
||||
std::array<char, 12> name;
|
||||
u32 version;
|
||||
u64 program_id;
|
||||
u64 code_address;
|
||||
s32 code_num_pages;
|
||||
CreateProcessFlag flags;
|
||||
u32 flags;
|
||||
Handle reslimit;
|
||||
s32 system_resource_num_pages;
|
||||
};
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
: ServiceFramework{system_, "IManagerForSystemService"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &IManagerForSystemService::CheckAvailability, "CheckAvailability"},
|
||||
{0, nullptr, "CheckAvailability"},
|
||||
{1, nullptr, "GetAccountId"},
|
||||
{2, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
{3, nullptr, "LoadIdTokenCache"},
|
||||
@@ -78,13 +78,6 @@ public:
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
private:
|
||||
void CheckAvailability(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
};
|
||||
|
||||
// 3.0.0+
|
||||
@@ -407,13 +400,13 @@ protected:
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto base = rp.PopRaw<ProfileBase>();
|
||||
|
||||
const auto image_data = ctx.ReadBufferA(0);
|
||||
const auto user_data = ctx.ReadBufferX(0);
|
||||
const auto user_data = ctx.ReadBuffer();
|
||||
const auto image_data = ctx.ReadBuffer(1);
|
||||
|
||||
LOG_INFO(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
|
||||
Common::StringFromFixedZeroTerminatedBuffer(
|
||||
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
||||
base.timestamp, base.user_uuid.RawString());
|
||||
LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
|
||||
Common::StringFromFixedZeroTerminatedBuffer(
|
||||
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
|
||||
base.timestamp, base.user_uuid.RawString());
|
||||
|
||||
if (user_data.size() < sizeof(UserData)) {
|
||||
LOG_ERROR(Service_ACC, "UserData buffer too small!");
|
||||
@@ -844,29 +837,6 @@ void Module::Interface::InitializeApplicationInfoV2(HLERequestContext& ctx) {
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Module::Interface::BeginUserRegistration(HLERequestContext& ctx) {
|
||||
const auto user_id = Common::UUID::MakeRandom();
|
||||
profile_manager->CreateNewUser(user_id, "yuzu");
|
||||
|
||||
LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 6};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushRaw(user_id);
|
||||
}
|
||||
|
||||
void Module::Interface::CompleteUserRegistration(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
Common::UUID user_id = rp.PopRaw<Common::UUID>();
|
||||
|
||||
LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
|
||||
|
||||
profile_manager->WriteUserSaveFile();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Module::Interface::GetProfileEditor(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
Common::UUID user_id = rp.PopRaw<Common::UUID>();
|
||||
@@ -910,17 +880,6 @@ void Module::Interface::StoreSaveDataThumbnailApplication(HLERequestContext& ctx
|
||||
StoreSaveDataThumbnail(ctx, uuid, tid);
|
||||
}
|
||||
|
||||
void Module::Interface::GetBaasAccountManagerForSystemService(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto uuid = rp.PopRaw<Common::UUID>();
|
||||
|
||||
LOG_INFO(Service_ACC, "called, uuid=0x{}", uuid.RawString());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushIpcInterface<IManagerForSystemService>(system, uuid);
|
||||
}
|
||||
|
||||
void Module::Interface::StoreSaveDataThumbnailSystem(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto uuid = rp.PopRaw<Common::UUID>();
|
||||
|
||||
@@ -33,13 +33,10 @@ public:
|
||||
void TrySelectUserWithoutInteraction(HLERequestContext& ctx);
|
||||
void IsUserAccountSwitchLocked(HLERequestContext& ctx);
|
||||
void InitializeApplicationInfoV2(HLERequestContext& ctx);
|
||||
void BeginUserRegistration(HLERequestContext& ctx);
|
||||
void CompleteUserRegistration(HLERequestContext& ctx);
|
||||
void GetProfileEditor(HLERequestContext& ctx);
|
||||
void ListQualifiedUsers(HLERequestContext& ctx);
|
||||
void ListOpenContextStoredUsers(HLERequestContext& ctx);
|
||||
void StoreSaveDataThumbnailApplication(HLERequestContext& ctx);
|
||||
void GetBaasAccountManagerForSystemService(HLERequestContext& ctx);
|
||||
void StoreSaveDataThumbnailSystem(HLERequestContext& ctx);
|
||||
|
||||
private:
|
||||
|
||||
@@ -23,7 +23,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"},
|
||||
{100, nullptr, "GetUserRegistrationNotifier"},
|
||||
{101, nullptr, "GetUserStateChangeNotifier"},
|
||||
{102, &ACC_SU::GetBaasAccountManagerForSystemService, "GetBaasAccountManagerForSystemService"},
|
||||
{102, nullptr, "GetBaasAccountManagerForSystemService"},
|
||||
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
|
||||
{104, nullptr, "GetProfileUpdateNotifier"},
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"},
|
||||
@@ -40,8 +40,8 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
|
||||
{152, nullptr, "LoadSignedDeviceIdentifierCacheForNintendoAccount"},
|
||||
{190, nullptr, "GetUserLastOpenedApplication"},
|
||||
{191, nullptr, "ActivateOpenContextHolder"},
|
||||
{200, &ACC_SU::BeginUserRegistration, "BeginUserRegistration"},
|
||||
{201, &ACC_SU::CompleteUserRegistration, "CompleteUserRegistration"},
|
||||
{200, nullptr, "BeginUserRegistration"},
|
||||
{201, nullptr, "CompleteUserRegistration"},
|
||||
{202, nullptr, "CancelUserRegistration"},
|
||||
{203, nullptr, "DeleteUser"},
|
||||
{204, nullptr, "SetUserPosition"},
|
||||
|
||||
@@ -96,10 +96,9 @@ public:
|
||||
bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new,
|
||||
const UserData& data_new);
|
||||
|
||||
void WriteUserSaveFile();
|
||||
|
||||
private:
|
||||
void ParseUserSaveFile();
|
||||
void WriteUserSaveFile();
|
||||
std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile);
|
||||
bool RemoveProfileAtIndex(std::size_t index);
|
||||
|
||||
|
||||
@@ -210,8 +210,8 @@ IDisplayController::IDisplayController(Core::System& system_)
|
||||
{21, nullptr, "ClearAppletTransitionBuffer"},
|
||||
{22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"},
|
||||
{23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"},
|
||||
{24, &IDisplayController::AcquireLastForegroundCaptureSharedBuffer, "AcquireLastForegroundCaptureSharedBuffer"},
|
||||
{25, &IDisplayController::ReleaseLastForegroundCaptureSharedBuffer, "ReleaseLastForegroundCaptureSharedBuffer"},
|
||||
{24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"},
|
||||
{25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"},
|
||||
{26, &IDisplayController::AcquireCallerAppletCaptureSharedBuffer, "AcquireCallerAppletCaptureSharedBuffer"},
|
||||
{27, &IDisplayController::ReleaseCallerAppletCaptureSharedBuffer, "ReleaseCallerAppletCaptureSharedBuffer"},
|
||||
{28, nullptr, "TakeScreenShotOfOwnLayerEx"},
|
||||
@@ -239,22 +239,6 @@ void IDisplayController::TakeScreenShotOfOwnLayer(HLERequestContext& ctx) {
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void IDisplayController::AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_AM, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(1U);
|
||||
rb.Push(0);
|
||||
}
|
||||
|
||||
void IDisplayController::ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_AM, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void IDisplayController::AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_AM, "(STUBBED) called");
|
||||
|
||||
@@ -796,9 +780,7 @@ ILockAccessor::ILockAccessor(Core::System& system_)
|
||||
lock_event = service_context.CreateEvent("ILockAccessor::LockEvent");
|
||||
}
|
||||
|
||||
ILockAccessor::~ILockAccessor() {
|
||||
service_context.CloseEvent(lock_event);
|
||||
};
|
||||
ILockAccessor::~ILockAccessor() = default;
|
||||
|
||||
void ILockAccessor::TryLock(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
@@ -911,9 +893,7 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system_,
|
||||
msg_queue->PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
|
||||
}
|
||||
|
||||
ICommonStateGetter::~ICommonStateGetter() {
|
||||
service_context.CloseEvent(sleep_lock_event);
|
||||
};
|
||||
ICommonStateGetter::~ICommonStateGetter() = default;
|
||||
|
||||
void ICommonStateGetter::GetBootMode(HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_AM, "called");
|
||||
@@ -1577,7 +1557,7 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
|
||||
{100, nullptr, "CreateGameMovieTrimmer"},
|
||||
{101, nullptr, "ReserveResourceForMovieOperation"},
|
||||
{102, nullptr, "UnreserveResourceForMovieOperation"},
|
||||
{110, &ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers, "GetMainAppletAvailableUsers"},
|
||||
{110, nullptr, "GetMainAppletAvailableUsers"},
|
||||
{120, nullptr, "GetLaunchStorageInfoForDebug"},
|
||||
{130, nullptr, "GetGpuErrorDetectedSystemEvent"},
|
||||
{140, nullptr, "SetApplicationMemoryReservation"},
|
||||
@@ -1672,25 +1652,6 @@ void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext&
|
||||
rb.PushRaw(applet_info);
|
||||
}
|
||||
|
||||
void ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers(HLERequestContext& ctx) {
|
||||
const Service::Account::ProfileManager manager{};
|
||||
bool is_empty{true};
|
||||
s32 user_count{-1};
|
||||
|
||||
LOG_INFO(Service_AM, "called");
|
||||
|
||||
if (manager.GetUserCount() > 0) {
|
||||
is_empty = false;
|
||||
user_count = static_cast<s32>(manager.GetUserCount());
|
||||
ctx.WriteBuffer(manager.GetAllUsers());
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push<u8>(is_empty);
|
||||
rb.Push(user_count);
|
||||
}
|
||||
|
||||
void ILibraryAppletSelfAccessor::PushInShowAlbum() {
|
||||
const Applets::CommonArguments arguments{
|
||||
.arguments_version = Applets::CommonArgumentVersion::Version3,
|
||||
|
||||
@@ -124,8 +124,6 @@ public:
|
||||
private:
|
||||
void GetCallerAppletCaptureImageEx(HLERequestContext& ctx);
|
||||
void TakeScreenShotOfOwnLayer(HLERequestContext& ctx);
|
||||
void AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
|
||||
void ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
|
||||
void AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
|
||||
void ReleaseCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
|
||||
};
|
||||
@@ -347,7 +345,6 @@ private:
|
||||
void GetLibraryAppletInfo(HLERequestContext& ctx);
|
||||
void ExitProcessAndReturn(HLERequestContext& ctx);
|
||||
void GetCallerAppletIdentityInfo(HLERequestContext& ctx);
|
||||
void GetMainAppletAvailableUsers(HLERequestContext& ctx);
|
||||
|
||||
void PushInShowAlbum();
|
||||
void PushInShowCabinetData();
|
||||
|
||||
@@ -25,9 +25,7 @@ Cabinet::Cabinet(Core::System& system_, LibraryAppletMode applet_mode_,
|
||||
service_context.CreateEvent("CabinetApplet:AvailabilityChangeEvent");
|
||||
}
|
||||
|
||||
Cabinet::~Cabinet() {
|
||||
service_context.CloseEvent(availability_change_event);
|
||||
};
|
||||
Cabinet::~Cabinet() = default;
|
||||
|
||||
void Cabinet::Initialize() {
|
||||
Applet::Initialize();
|
||||
|
||||
@@ -25,12 +25,11 @@ void LoopProcess(Core::System& system) {
|
||||
server_manager->RegisterNamedService(
|
||||
"caps:u", std::make_shared<IAlbumApplicationService>(system, album_manager));
|
||||
|
||||
server_manager->RegisterNamedService(
|
||||
"caps:ss", std::make_shared<IScreenShotService>(system, album_manager));
|
||||
server_manager->RegisterNamedService("caps:ss", std::make_shared<IScreenShotService>(system));
|
||||
server_manager->RegisterNamedService("caps:sc",
|
||||
std::make_shared<IScreenShotControlService>(system));
|
||||
server_manager->RegisterNamedService(
|
||||
"caps:su", std::make_shared<IScreenShotApplicationService>(system, album_manager));
|
||||
server_manager->RegisterNamedService("caps:su",
|
||||
std::make_shared<IScreenShotApplicationService>(system));
|
||||
|
||||
ServerManager::RunServer(std::move(server_manager));
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <sstream>
|
||||
#include <stb_image.h>
|
||||
#include <stb_image_resize.h>
|
||||
|
||||
#include "common/fs/file.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/stb.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/caps/caps_manager.h"
|
||||
#include "core/hle/service/caps/caps_result.h"
|
||||
@@ -226,49 +227,6 @@ Result AlbumManager::LoadAlbumScreenShotThumbnail(
|
||||
+static_cast<int>(out_image_output.height), decoder_options.flags);
|
||||
}
|
||||
|
||||
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
|
||||
const ScreenShotAttribute& attribute,
|
||||
std::span<const u8> image_data, u64 aruid) {
|
||||
return SaveScreenShot(out_entry, attribute, {}, image_data, aruid);
|
||||
}
|
||||
|
||||
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
|
||||
const ScreenShotAttribute& attribute,
|
||||
const ApplicationData& app_data, std::span<const u8> image_data,
|
||||
u64 aruid) {
|
||||
const u64 title_id = system.GetApplicationProcessProgramID();
|
||||
const auto& user_clock = system.GetTimeManager().GetStandardUserSystemClockCore();
|
||||
|
||||
s64 posix_time{};
|
||||
Result result = user_clock.GetCurrentTime(system, posix_time);
|
||||
|
||||
if (result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const auto date = ConvertToAlbumDateTime(posix_time);
|
||||
|
||||
return SaveImage(out_entry, image_data, title_id, date);
|
||||
}
|
||||
|
||||
Result AlbumManager::SaveEditedScreenShot(ApplicationAlbumEntry& out_entry,
|
||||
const ScreenShotAttribute& attribute,
|
||||
const AlbumFileId& file_id,
|
||||
std::span<const u8> image_data) {
|
||||
const auto& user_clock = system.GetTimeManager().GetStandardUserSystemClockCore();
|
||||
|
||||
s64 posix_time{};
|
||||
Result result = user_clock.GetCurrentTime(system, posix_time);
|
||||
|
||||
if (result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const auto date = ConvertToAlbumDateTime(posix_time);
|
||||
|
||||
return SaveImage(out_entry, image_data, file_id.application_id, date);
|
||||
}
|
||||
|
||||
Result AlbumManager::GetFile(std::filesystem::path& out_path, const AlbumFileId& file_id) const {
|
||||
const auto file = album_files.find(file_id);
|
||||
|
||||
@@ -407,47 +365,6 @@ Result AlbumManager::LoadImage(std::span<u8> out_image, const std::filesystem::p
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static void PNGToMemory(void* context, void* png, int len) {
|
||||
std::vector<u8>* png_image = static_cast<std::vector<u8>*>(context);
|
||||
png_image->reserve(len);
|
||||
std::memcpy(png_image->data(), png, len);
|
||||
}
|
||||
|
||||
Result AlbumManager::SaveImage(ApplicationAlbumEntry& out_entry, std::span<const u8> image,
|
||||
u64 title_id, const AlbumFileDateTime& date) const {
|
||||
const auto screenshot_path =
|
||||
Common::FS::GetYuzuPathString(Common::FS::YuzuPath::ScreenshotsDir);
|
||||
const std::string formatted_date =
|
||||
fmt::format("{:04}-{:02}-{:02}_{:02}-{:02}-{:02}-{:03}", date.year, date.month, date.day,
|
||||
date.hour, date.minute, date.second, 0);
|
||||
const std::string file_path =
|
||||
fmt::format("{}/{:016x}_{}.png", screenshot_path, title_id, formatted_date);
|
||||
|
||||
const Common::FS::IOFile db_file{file_path, Common::FS::FileAccessMode::Write,
|
||||
Common::FS::FileType::BinaryFile};
|
||||
|
||||
std::vector<u8> png_image;
|
||||
if (!stbi_write_png_to_func(PNGToMemory, &png_image, 1280, 720, STBI_rgb_alpha, image.data(),
|
||||
0)) {
|
||||
return ResultFileCountLimit;
|
||||
}
|
||||
|
||||
if (db_file.Write(png_image) != png_image.size()) {
|
||||
return ResultFileCountLimit;
|
||||
}
|
||||
|
||||
out_entry = {
|
||||
.size = png_image.size(),
|
||||
.hash = {},
|
||||
.datetime = date,
|
||||
.storage = AlbumStorage::Sd,
|
||||
.content = ContentType::Screenshot,
|
||||
.unknown = 1,
|
||||
};
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
AlbumFileDateTime AlbumManager::ConvertToAlbumDateTime(u64 posix_time) const {
|
||||
Time::TimeZone::CalendarInfo calendar_date{};
|
||||
const auto& time_zone_manager =
|
||||
|
||||
@@ -58,15 +58,6 @@ public:
|
||||
std::vector<u8>& out_image, const AlbumFileId& file_id,
|
||||
const ScreenShotDecodeOption& decoder_options) const;
|
||||
|
||||
Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
|
||||
std::span<const u8> image_data, u64 aruid);
|
||||
Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
|
||||
const ApplicationData& app_data, std::span<const u8> image_data,
|
||||
u64 aruid);
|
||||
Result SaveEditedScreenShot(ApplicationAlbumEntry& out_entry,
|
||||
const ScreenShotAttribute& attribute, const AlbumFileId& file_id,
|
||||
std::span<const u8> image_data);
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NandAlbumFileLimit = 1000;
|
||||
static constexpr std::size_t SdAlbumFileLimit = 10000;
|
||||
@@ -76,8 +67,6 @@ private:
|
||||
Result GetAlbumEntry(AlbumEntry& out_entry, const std::filesystem::path& path) const;
|
||||
Result LoadImage(std::span<u8> out_image, const std::filesystem::path& path, int width,
|
||||
int height, ScreenShotDecoderFlag flag) const;
|
||||
Result SaveImage(ApplicationAlbumEntry& out_entry, std::span<const u8> image, u64 title_id,
|
||||
const AlbumFileDateTime& date) const;
|
||||
|
||||
AlbumFileDateTime ConvertToAlbumDateTime(u64 posix_time) const;
|
||||
|
||||
|
||||
@@ -1,25 +1,19 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/caps/caps_manager.h"
|
||||
#include "core/hle/service/caps/caps_types.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
|
||||
#include "core/hle/service/caps/caps_ss.h"
|
||||
|
||||
namespace Service::Capture {
|
||||
|
||||
IScreenShotService::IScreenShotService(Core::System& system_,
|
||||
std::shared_ptr<AlbumManager> album_manager)
|
||||
: ServiceFramework{system_, "caps:ss"}, manager{album_manager} {
|
||||
IScreenShotService::IScreenShotService(Core::System& system_)
|
||||
: ServiceFramework{system_, "caps:ss"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{201, nullptr, "SaveScreenShot"},
|
||||
{202, nullptr, "SaveEditedScreenShot"},
|
||||
{203, &IScreenShotService::SaveScreenShotEx0, "SaveScreenShotEx0"},
|
||||
{203, nullptr, "SaveScreenShotEx0"},
|
||||
{204, nullptr, "SaveEditedScreenShotEx0"},
|
||||
{206, &IScreenShotService::SaveEditedScreenShotEx1, "SaveEditedScreenShotEx1"},
|
||||
{206, nullptr, "Unknown206"},
|
||||
{208, nullptr, "SaveScreenShotOfMovieEx1"},
|
||||
{1000, nullptr, "Unknown1000"},
|
||||
};
|
||||
@@ -30,65 +24,4 @@ IScreenShotService::IScreenShotService(Core::System& system_,
|
||||
|
||||
IScreenShotService::~IScreenShotService() = default;
|
||||
|
||||
void IScreenShotService::SaveScreenShotEx0(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
ScreenShotAttribute attribute{};
|
||||
u32 report_option{};
|
||||
INSERT_PADDING_BYTES(0x4);
|
||||
u64 applet_resource_user_id{};
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
const auto image_data_buffer = ctx.ReadBuffer();
|
||||
|
||||
LOG_INFO(Service_Capture,
|
||||
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
|
||||
parameters.report_option, image_data_buffer.size(),
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
ApplicationAlbumEntry entry{};
|
||||
const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 10};
|
||||
rb.Push(result);
|
||||
rb.PushRaw(entry);
|
||||
}
|
||||
void IScreenShotService::SaveEditedScreenShotEx1(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
ScreenShotAttribute attribute;
|
||||
u64 width;
|
||||
u64 height;
|
||||
u64 thumbnail_width;
|
||||
u64 thumbnail_height;
|
||||
AlbumFileId file_id;
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x78, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
const auto application_data_buffer = ctx.ReadBuffer(0);
|
||||
const auto image_data_buffer = ctx.ReadBuffer(1);
|
||||
const auto thumbnail_image_data_buffer = ctx.ReadBuffer(2);
|
||||
|
||||
LOG_INFO(Service_Capture,
|
||||
"called, width={}, height={}, thumbnail_width={}, thumbnail_height={}, "
|
||||
"application_id={:016x}, storage={}, type={}, app_data_buffer_size={}, "
|
||||
"image_data_buffer_size={}, thumbnail_image_buffer_size={}",
|
||||
parameters.width, parameters.height, parameters.thumbnail_width,
|
||||
parameters.thumbnail_height, parameters.file_id.application_id,
|
||||
parameters.file_id.storage, parameters.file_id.type, application_data_buffer.size(),
|
||||
image_data_buffer.size(), thumbnail_image_data_buffer.size());
|
||||
|
||||
ApplicationAlbumEntry entry{};
|
||||
const auto result = manager->SaveEditedScreenShot(entry, parameters.attribute,
|
||||
parameters.file_id, image_data_buffer);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 10};
|
||||
rb.Push(result);
|
||||
rb.PushRaw(entry);
|
||||
}
|
||||
|
||||
} // namespace Service::Capture
|
||||
|
||||
@@ -13,14 +13,8 @@ namespace Service::Capture {
|
||||
|
||||
class IScreenShotService final : public ServiceFramework<IScreenShotService> {
|
||||
public:
|
||||
explicit IScreenShotService(Core::System& system_, std::shared_ptr<AlbumManager> album_manager);
|
||||
explicit IScreenShotService(Core::System& system_);
|
||||
~IScreenShotService() override;
|
||||
|
||||
private:
|
||||
void SaveScreenShotEx0(HLERequestContext& ctx);
|
||||
void SaveEditedScreenShotEx1(HLERequestContext& ctx);
|
||||
|
||||
std::shared_ptr<AlbumManager> manager;
|
||||
};
|
||||
|
||||
} // namespace Service::Capture
|
||||
|
||||
@@ -2,22 +2,19 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/caps/caps_manager.h"
|
||||
#include "core/hle/service/caps/caps_su.h"
|
||||
#include "core/hle/service/caps/caps_types.h"
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
|
||||
namespace Service::Capture {
|
||||
|
||||
IScreenShotApplicationService::IScreenShotApplicationService(
|
||||
Core::System& system_, std::shared_ptr<AlbumManager> album_manager)
|
||||
: ServiceFramework{system_, "caps:su"}, manager{album_manager} {
|
||||
IScreenShotApplicationService::IScreenShotApplicationService(Core::System& system_)
|
||||
: ServiceFramework{system_, "caps:su"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{32, &IScreenShotApplicationService::SetShimLibraryVersion, "SetShimLibraryVersion"},
|
||||
{201, nullptr, "SaveScreenShot"},
|
||||
{203, &IScreenShotApplicationService::SaveScreenShotEx0, "SaveScreenShotEx0"},
|
||||
{205, &IScreenShotApplicationService::SaveScreenShotEx1, "SaveScreenShotEx1"},
|
||||
{203, nullptr, "SaveScreenShotEx0"},
|
||||
{205, nullptr, "SaveScreenShotEx1"},
|
||||
{210, nullptr, "SaveScreenShotEx2"},
|
||||
};
|
||||
// clang-format on
|
||||
@@ -39,62 +36,4 @@ void IScreenShotApplicationService::SetShimLibraryVersion(HLERequestContext& ctx
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void IScreenShotApplicationService::SaveScreenShotEx0(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
ScreenShotAttribute attribute{};
|
||||
AlbumReportOption report_option{};
|
||||
INSERT_PADDING_BYTES(0x4);
|
||||
u64 applet_resource_user_id{};
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
const auto image_data_buffer = ctx.ReadBuffer();
|
||||
|
||||
LOG_INFO(Service_Capture,
|
||||
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
|
||||
parameters.report_option, image_data_buffer.size(),
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
ApplicationAlbumEntry entry{};
|
||||
const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 10};
|
||||
rb.Push(result);
|
||||
rb.PushRaw(entry);
|
||||
}
|
||||
|
||||
void IScreenShotApplicationService::SaveScreenShotEx1(HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
ScreenShotAttribute attribute{};
|
||||
AlbumReportOption report_option{};
|
||||
INSERT_PADDING_BYTES(0x4);
|
||||
u64 applet_resource_user_id{};
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
const auto app_data_buffer = ctx.ReadBuffer(0);
|
||||
const auto image_data_buffer = ctx.ReadBuffer(1);
|
||||
|
||||
LOG_INFO(Service_Capture,
|
||||
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
|
||||
parameters.report_option, image_data_buffer.size(),
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
ApplicationAlbumEntry entry{};
|
||||
ApplicationData app_data{};
|
||||
std::memcpy(&app_data, app_data_buffer.data(), sizeof(ApplicationData));
|
||||
const auto result =
|
||||
manager->SaveScreenShot(entry, parameters.attribute, app_data, image_data_buffer,
|
||||
parameters.applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 10};
|
||||
rb.Push(result);
|
||||
rb.PushRaw(entry);
|
||||
}
|
||||
|
||||
} // namespace Service::Capture
|
||||
|
||||
@@ -10,20 +10,14 @@ class System;
|
||||
}
|
||||
|
||||
namespace Service::Capture {
|
||||
class AlbumManager;
|
||||
|
||||
class IScreenShotApplicationService final : public ServiceFramework<IScreenShotApplicationService> {
|
||||
public:
|
||||
explicit IScreenShotApplicationService(Core::System& system_,
|
||||
std::shared_ptr<AlbumManager> album_manager);
|
||||
explicit IScreenShotApplicationService(Core::System& system_);
|
||||
~IScreenShotApplicationService() override;
|
||||
|
||||
private:
|
||||
void SetShimLibraryVersion(HLERequestContext& ctx);
|
||||
void SaveScreenShotEx0(HLERequestContext& ctx);
|
||||
void SaveScreenShotEx1(HLERequestContext& ctx);
|
||||
|
||||
std::shared_ptr<AlbumManager> manager;
|
||||
};
|
||||
|
||||
} // namespace Service::Capture
|
||||
|
||||
@@ -20,8 +20,6 @@ enum class AlbumImageOrientation {
|
||||
enum class AlbumReportOption : s32 {
|
||||
Disable,
|
||||
Enable,
|
||||
Unknown2,
|
||||
Unknown3,
|
||||
};
|
||||
|
||||
enum class ContentType : u8 {
|
||||
|
||||
@@ -19,9 +19,7 @@ Controller_Palma::Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared
|
||||
operation_complete_event = service_context.CreateEvent("hid:PalmaOperationCompleteEvent");
|
||||
}
|
||||
|
||||
Controller_Palma::~Controller_Palma() {
|
||||
service_context.CloseEvent(operation_complete_event);
|
||||
};
|
||||
Controller_Palma::~Controller_Palma() = default;
|
||||
|
||||
void Controller_Palma::OnInit() {}
|
||||
|
||||
|
||||
@@ -2757,10 +2757,6 @@ public:
|
||||
joy_detach_event = service_context.CreateEvent("HidSys::JoyDetachEvent");
|
||||
}
|
||||
|
||||
~HidSys() {
|
||||
service_context.CloseEvent(joy_detach_event);
|
||||
};
|
||||
|
||||
private:
|
||||
void ApplyNpadSystemCommonPolicy(HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_HID, "called");
|
||||
|
||||
@@ -13,10 +13,7 @@ HidbusBase::HidbusBase(Core::System& system_, KernelHelpers::ServiceContext& ser
|
||||
: system(system_), service_context(service_context_) {
|
||||
send_command_async_event = service_context.CreateEvent("hidbus:SendCommandAsyncEvent");
|
||||
}
|
||||
|
||||
HidbusBase::~HidbusBase() {
|
||||
service_context.CloseEvent(send_command_async_event);
|
||||
};
|
||||
HidbusBase::~HidbusBase() = default;
|
||||
|
||||
void HidbusBase::ActivateDevice() {
|
||||
if (is_activated) {
|
||||
|
||||
@@ -23,19 +23,6 @@
|
||||
#include "core/hle/service/ipc_helpers.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace {
|
||||
static thread_local std::array read_buffer_data_a{
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
};
|
||||
static thread_local std::array read_buffer_data_x{
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
namespace Service {
|
||||
|
||||
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
|
||||
@@ -341,61 +328,26 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
|
||||
}
|
||||
}
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_a{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
};
|
||||
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorA().size() > buffer_index, { return {}; },
|
||||
"BufferDescriptorA invalid buffer_index {}", buffer_index);
|
||||
auto& read_buffer = read_buffer_a[buffer_index];
|
||||
return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
|
||||
BufferDescriptorA()[buffer_index].Size(),
|
||||
&read_buffer_data_a[buffer_index]);
|
||||
}
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_x{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
};
|
||||
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorX().size() > buffer_index, { return {}; },
|
||||
"BufferDescriptorX invalid buffer_index {}", buffer_index);
|
||||
auto& read_buffer = read_buffer_x[buffer_index];
|
||||
return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
|
||||
BufferDescriptorX()[buffer_index].Size(),
|
||||
&read_buffer_data_x[buffer_index]);
|
||||
}
|
||||
|
||||
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
|
||||
static thread_local std::array read_buffer_a{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
};
|
||||
static thread_local std::array read_buffer_data_a{
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
};
|
||||
static thread_local std::array read_buffer_x{
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
|
||||
};
|
||||
static thread_local std::array read_buffer_data_x{
|
||||
Common::ScratchBuffer<u8>(),
|
||||
Common::ScratchBuffer<u8>(),
|
||||
};
|
||||
|
||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||
BufferDescriptorA()[buffer_index].Size()};
|
||||
const bool is_buffer_x{BufferDescriptorX().size() > buffer_index &&
|
||||
BufferDescriptorX()[buffer_index].Size()};
|
||||
|
||||
if (is_buffer_a && is_buffer_x) {
|
||||
LOG_WARNING(Input, "Both buffer descriptors are available a.size={}, x.size={}",
|
||||
BufferDescriptorA()[buffer_index].Size(),
|
||||
BufferDescriptorX()[buffer_index].Size());
|
||||
}
|
||||
|
||||
if (is_buffer_a) {
|
||||
ASSERT_OR_EXECUTE_MSG(
|
||||
BufferDescriptorA().size() > buffer_index, { return {}; },
|
||||
|
||||
@@ -253,12 +253,6 @@ public:
|
||||
return domain_message_header.has_value();
|
||||
}
|
||||
|
||||
/// Helper function to get a span of a buffer using the buffer descriptor A
|
||||
[[nodiscard]] std::span<const u8> ReadBufferA(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to get a span of a buffer using the buffer descriptor X
|
||||
[[nodiscard]] std::span<const u8> ReadBufferX(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to get a span of a buffer using the appropriate buffer descriptor
|
||||
[[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ ServiceContext::ServiceContext(Core::System& system_, std::string name_)
|
||||
|
||||
// Create the process.
|
||||
process = Kernel::KProcess::Create(kernel);
|
||||
ASSERT(R_SUCCEEDED(process->Initialize(Kernel::Svc::CreateProcessParameter{},
|
||||
kernel.GetSystemResourceLimit(), false)));
|
||||
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
|
||||
Kernel::KProcess::ProcessType::KernelInternal,
|
||||
kernel.GetSystemResourceLimit())
|
||||
.IsSuccess());
|
||||
|
||||
// Register the process.
|
||||
Kernel::KProcess::Register(kernel, process);
|
||||
|
||||
@@ -41,7 +41,6 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) {
|
||||
}
|
||||
}
|
||||
|
||||
SetDefault();
|
||||
SetGender(gender);
|
||||
SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max));
|
||||
SetRegionMove(0);
|
||||
|
||||
@@ -41,7 +41,7 @@ bool BufferQueueCore::WaitForDequeueCondition(std::unique_lock<std::mutex>& lk)
|
||||
s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
|
||||
// If DequeueBuffer is allowed to error out, we don't have to add an extra buffer.
|
||||
if (!use_async_buffer) {
|
||||
return 0;
|
||||
return max_acquired_buffer_count;
|
||||
}
|
||||
|
||||
if (dequeue_buffer_cannot_block || async) {
|
||||
@@ -52,7 +52,7 @@ s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
|
||||
}
|
||||
|
||||
s32 BufferQueueCore::GetMinMaxBufferCountLocked(bool async) const {
|
||||
return GetMinUndequeuedBufferCountLocked(async);
|
||||
return GetMinUndequeuedBufferCountLocked(async) + 1;
|
||||
}
|
||||
|
||||
s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
|
||||
@@ -61,7 +61,7 @@ s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
|
||||
|
||||
if (override_max_buffer_count != 0) {
|
||||
ASSERT(override_max_buffer_count >= min_buffer_count);
|
||||
return override_max_buffer_count;
|
||||
max_buffer_count = override_max_buffer_count;
|
||||
}
|
||||
|
||||
// Any buffers that are dequeued by the producer or sitting in the queue waiting to be consumed
|
||||
|
||||
@@ -134,7 +134,7 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||
const s32 max_buffer_count = core->GetMaxBufferCountLocked(async);
|
||||
if (async && core->override_max_buffer_count) {
|
||||
if (core->override_max_buffer_count < max_buffer_count) {
|
||||
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
|
||||
LOG_ERROR(Service_Nvnflinger, "async mode is invalid with buffer count override");
|
||||
return Status::BadValue;
|
||||
}
|
||||
}
|
||||
@@ -142,8 +142,7 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
|
||||
// Free up any buffers that are in slots beyond the max buffer count
|
||||
for (s32 s = max_buffer_count; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
|
||||
ASSERT(slots[s].buffer_state == BufferState::Free);
|
||||
if (slots[s].graphic_buffer != nullptr && slots[s].buffer_state == BufferState::Free &&
|
||||
!slots[s].is_preallocated) {
|
||||
if (slots[s].graphic_buffer != nullptr) {
|
||||
core->FreeBufferLocked(s);
|
||||
*return_flags |= Status::ReleaseAllBuffers;
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
|
||||
// Get bounds of where mapping is possible.
|
||||
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
|
||||
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
|
||||
const auto state = Kernel::KMemoryState::IoMemory;
|
||||
const auto state = Kernel::KMemoryState::Io;
|
||||
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
|
||||
std::mt19937_64 rng{process->GetRandomEntropy(0)};
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||
"ScreenComposition",
|
||||
[this](std::uintptr_t, s64 time,
|
||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||
{ const auto lock_guard = Lock(); }
|
||||
vsync_signal.Set();
|
||||
return std::chrono::nanoseconds(GetNextTicks());
|
||||
});
|
||||
@@ -98,7 +99,6 @@ Nvnflinger::~Nvnflinger() {
|
||||
}
|
||||
|
||||
ShutdownLayers();
|
||||
vsync_thread = {};
|
||||
|
||||
if (nvdrv) {
|
||||
nvdrv->Close(disp_fd);
|
||||
@@ -106,7 +106,6 @@ Nvnflinger::~Nvnflinger() {
|
||||
}
|
||||
|
||||
void Nvnflinger::ShutdownLayers() {
|
||||
const auto lock_guard = Lock();
|
||||
for (auto& display : displays) {
|
||||
for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
|
||||
display.GetLayer(layer).Core().NotifyShutdown();
|
||||
@@ -230,6 +229,16 @@ VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) {
|
||||
return display->FindLayer(layer_id);
|
||||
}
|
||||
|
||||
const VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) const {
|
||||
const auto* const display = FindDisplay(display_id);
|
||||
|
||||
if (display == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return display->FindLayer(layer_id);
|
||||
}
|
||||
|
||||
VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
|
||||
auto* const display = FindDisplay(display_id);
|
||||
|
||||
@@ -279,6 +288,7 @@ void Nvnflinger::Compose() {
|
||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||
ASSERT(nvdisp);
|
||||
|
||||
guard->unlock();
|
||||
Common::Rectangle<int> crop_rect{
|
||||
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
||||
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
||||
@@ -289,6 +299,7 @@ void Nvnflinger::Compose() {
|
||||
buffer.fence.fences, buffer.fence.num_fences);
|
||||
|
||||
MicroProfileFlip();
|
||||
guard->lock();
|
||||
|
||||
swap_interval = buffer.swap_interval;
|
||||
|
||||
|
||||
@@ -117,6 +117,9 @@ private:
|
||||
/// Finds the layer identified by the specified ID in the desired display.
|
||||
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
|
||||
|
||||
/// Finds the layer identified by the specified ID in the desired display.
|
||||
[[nodiscard]] const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
|
||||
|
||||
/// Finds the layer identified by the specified ID in the desired display,
|
||||
/// or creates the layer if it is not found.
|
||||
/// To be used when the system expects the specified ID to already exist.
|
||||
|
||||
@@ -141,12 +141,6 @@ public:
|
||||
service_context.CreateEvent("IParentalControlService::RequestSuspensionEvent");
|
||||
}
|
||||
|
||||
~IParentalControlService() {
|
||||
service_context.CloseEvent(synchronization_event);
|
||||
service_context.CloseEvent(unlinked_event);
|
||||
service_context.CloseEvent(request_suspension_event);
|
||||
};
|
||||
|
||||
private:
|
||||
bool CheckFreeCommunicationPermissionImpl() const {
|
||||
if (states.temporary_unlocked) {
|
||||
|
||||
@@ -37,7 +37,7 @@ std::optional<Kernel::KProcess*> SearchProcessList(
|
||||
void GetApplicationPidGeneric(HLERequestContext& ctx,
|
||||
const std::vector<Kernel::KProcess*>& process_list) {
|
||||
const auto process = SearchProcessList(process_list, [](const auto& proc) {
|
||||
return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
|
||||
return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
|
||||
});
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
|
||||
@@ -58,8 +58,14 @@ private:
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto process_id = rp.PopRaw<u64>();
|
||||
|
||||
const auto data1 = ctx.ReadBufferA(0);
|
||||
const auto data2 = ctx.ReadBufferX(0);
|
||||
const auto data1 = ctx.ReadBuffer(0);
|
||||
const auto data2 = [&ctx] {
|
||||
if (ctx.CanReadBuffer(1)) {
|
||||
return ctx.ReadBuffer(1);
|
||||
}
|
||||
|
||||
return std::span<const u8>{};
|
||||
}();
|
||||
|
||||
LOG_DEBUG(Service_PREPO,
|
||||
"called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
||||
@@ -79,8 +85,14 @@ private:
|
||||
const auto user_id = rp.PopRaw<u128>();
|
||||
const auto process_id = rp.PopRaw<u64>();
|
||||
|
||||
const auto data1 = ctx.ReadBufferA(0);
|
||||
const auto data2 = ctx.ReadBufferX(0);
|
||||
const auto data1 = ctx.ReadBuffer(0);
|
||||
const auto data2 = [&ctx] {
|
||||
if (ctx.CanReadBuffer(1)) {
|
||||
return ctx.ReadBuffer(1);
|
||||
}
|
||||
|
||||
return std::span<const u8>{};
|
||||
}();
|
||||
|
||||
LOG_DEBUG(Service_PREPO,
|
||||
"called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, "
|
||||
@@ -125,8 +137,14 @@ private:
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto title_id = rp.PopRaw<u64>();
|
||||
|
||||
const auto data1 = ctx.ReadBufferA(0);
|
||||
const auto data2 = ctx.ReadBufferX(0);
|
||||
const auto data1 = ctx.ReadBuffer(0);
|
||||
const auto data2 = [&ctx] {
|
||||
if (ctx.CanReadBuffer(1)) {
|
||||
return ctx.ReadBuffer(1);
|
||||
}
|
||||
|
||||
return std::span<const u8>{};
|
||||
}();
|
||||
|
||||
LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
|
||||
title_id, data1.size(), data2.size());
|
||||
@@ -143,8 +161,14 @@ private:
|
||||
const auto user_id = rp.PopRaw<u128>();
|
||||
const auto title_id = rp.PopRaw<u64>();
|
||||
|
||||
const auto data1 = ctx.ReadBufferA(0);
|
||||
const auto data2 = ctx.ReadBufferX(0);
|
||||
const auto data1 = ctx.ReadBuffer(0);
|
||||
const auto data2 = [&ctx] {
|
||||
if (ctx.CanReadBuffer(1)) {
|
||||
return ctx.ReadBuffer(1);
|
||||
}
|
||||
|
||||
return std::span<const u8>{};
|
||||
}();
|
||||
|
||||
LOG_DEBUG(Service_PREPO,
|
||||
"called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, "
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user