Compare commits
39 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c1e2c63c3 | ||
|
|
603952bc27 | ||
|
|
3196d957b0 | ||
|
|
4ef66ec8fb | ||
|
|
d41ffb592c | ||
|
|
01bc0c84f0 | ||
|
|
2575a93dc6 | ||
|
|
f5c1d7b8c8 | ||
|
|
86ccce3721 | ||
|
|
38e4a144a1 | ||
|
|
9cafb0d912 | ||
|
|
00b09de3d9 | ||
|
|
a2d29412cb | ||
|
|
846c994cc9 | ||
|
|
096366ead5 | ||
|
|
c78f6d4f20 | ||
|
|
c34a95fa25 | ||
|
|
b5d6194f6d | ||
|
|
a5e419535f | ||
|
|
9775fae4eb | ||
|
|
a262dc02b5 | ||
|
|
fca5752690 | ||
|
|
7b48e7b363 | ||
|
|
a7d9be1384 | ||
|
|
abfd690601 | ||
|
|
bf7e78795f | ||
|
|
a14438d013 | ||
|
|
48737a4bb2 | ||
|
|
b321c39371 | ||
|
|
19f475fd70 | ||
|
|
2c56e94702 | ||
|
|
95b844dbae | ||
|
|
9da4e62573 | ||
|
|
1c8f6ba18f | ||
|
|
ab0e71d7cb | ||
|
|
24d7aaf43c | ||
|
|
c42fde2a37 | ||
|
|
fef3d8acb5 | ||
|
|
e56410b404 |
@@ -1,12 +1,27 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_QT_TRANSLATION=ON
|
||||
ninja
|
||||
LDFLAGS="-fuse-ld=lld"
|
||||
# -femulated-tls required due to an incompatibility between GCC and Clang
|
||||
# TODO(lat9nq): If this is widespread, we probably need to add this to CMakeLists where appropriate
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_FLAGS="-femulated-tls" \
|
||||
-DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWClangCross.cmake" \
|
||||
-DDISPLAY_VERSION=$1 \
|
||||
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
||||
-DENABLE_QT_TRANSLATION=ON \
|
||||
-DUSE_CCACHE=ON \
|
||||
-DYUZU_USE_BUNDLED_SDL2=OFF \
|
||||
-DYUZU_USE_EXTERNAL_SDL2=OFF \
|
||||
-GNinja
|
||||
ninja yuzu yuzu-cmd
|
||||
|
||||
ccache -s
|
||||
|
||||
|
||||
55
CMakeModules/MinGWClangCross.cmake
Normal file
55
CMakeModules/MinGWClangCross.cmake
Normal file
@@ -0,0 +1,55 @@
|
||||
set(MINGW_PREFIX /usr/x86_64-w64-mingw32/)
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH ${MINGW_PREFIX})
|
||||
set(SDL2_PATH ${MINGW_PREFIX})
|
||||
set(MINGW_TOOL_PREFIX ${CMAKE_SYSTEM_PROCESSOR}-w64-mingw32-)
|
||||
|
||||
# Specify the cross compiler
|
||||
set(CMAKE_C_COMPILER ${MINGW_TOOL_PREFIX}clang)
|
||||
set(CMAKE_CXX_COMPILER ${MINGW_TOOL_PREFIX}clang++)
|
||||
set(CMAKE_RC_COMPILER ${MINGW_TOOL_PREFIX}windres)
|
||||
set(CMAKE_C_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
|
||||
set(CMAKE_CXX_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
|
||||
set(CMAKE_C_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
|
||||
set(CMAKE_CXX_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
|
||||
|
||||
# Mingw tools
|
||||
set(STRIP ${MINGW_TOOL_PREFIX}strip)
|
||||
set(WINDRES ${MINGW_TOOL_PREFIX}windres)
|
||||
set(ENV{PKG_CONFIG} ${MINGW_TOOL_PREFIX}pkg-config)
|
||||
|
||||
# ccache wrapper
|
||||
option(USE_CCACHE "Use ccache for compilation" OFF)
|
||||
if(USE_CCACHE)
|
||||
find_program(CCACHE ccache)
|
||||
if(CCACHE)
|
||||
message(STATUS "Using ccache found in PATH")
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE})
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE})
|
||||
else(CCACHE)
|
||||
message(WARNING "USE_CCACHE enabled, but no ccache found")
|
||||
endif(CCACHE)
|
||||
endif(USE_CCACHE)
|
||||
|
||||
# Search for programs in the build host directories
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
|
||||
|
||||
# Echo modified cmake vars to screen for debugging purposes
|
||||
if(NOT DEFINED ENV{MINGW_DEBUG_INFO})
|
||||
message("")
|
||||
message("Custom cmake vars: (blank = system default)")
|
||||
message("-----------------------------------------")
|
||||
message("* CMAKE_C_COMPILER : ${CMAKE_C_COMPILER}")
|
||||
message("* CMAKE_CXX_COMPILER : ${CMAKE_CXX_COMPILER}")
|
||||
message("* CMAKE_RC_COMPILER : ${CMAKE_RC_COMPILER}")
|
||||
message("* WINDRES : ${WINDRES}")
|
||||
message("* ENV{PKG_CONFIG} : $ENV{PKG_CONFIG}")
|
||||
message("* STRIP : ${STRIP}")
|
||||
message("* USE_CCACHE : ${USE_CCACHE}")
|
||||
message("")
|
||||
# So that the debug info only appears once
|
||||
set(ENV{MINGW_DEBUG_INFO} SHOWN)
|
||||
endif()
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 7f84870712...5ad1d02351
@@ -98,13 +98,13 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memor
|
||||
|
||||
AudioRenderer::~AudioRenderer() = default;
|
||||
|
||||
ResultCode AudioRenderer::Start() {
|
||||
Result AudioRenderer::Start() {
|
||||
audio_out->StartStream(stream);
|
||||
ReleaseAndQueueBuffers();
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode AudioRenderer::Stop() {
|
||||
Result AudioRenderer::Stop() {
|
||||
audio_out->StopStream(stream);
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -125,8 +125,8 @@ Stream::State AudioRenderer::GetStreamState() const {
|
||||
return stream->GetState();
|
||||
}
|
||||
|
||||
ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params) {
|
||||
Result AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params) {
|
||||
std::scoped_lock lock{mutex};
|
||||
InfoUpdater info_updater{input_params, output_params, behavior_info};
|
||||
|
||||
|
||||
@@ -43,10 +43,10 @@ public:
|
||||
Stream::ReleaseCallback&& release_callback, std::size_t instance_number);
|
||||
~AudioRenderer();
|
||||
|
||||
[[nodiscard]] ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params);
|
||||
[[nodiscard]] ResultCode Start();
|
||||
[[nodiscard]] ResultCode Stop();
|
||||
[[nodiscard]] Result UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params);
|
||||
[[nodiscard]] Result Start();
|
||||
[[nodiscard]] Result Stop();
|
||||
void QueueMixedBuffer(Buffer::Tag tag);
|
||||
void ReleaseAndQueueBuffers();
|
||||
[[nodiscard]] u32 GetSampleRate() const;
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
namespace AudioCommon {
|
||||
namespace Audren {
|
||||
constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
|
||||
constexpr ResultCode ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
|
||||
constexpr Result ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
|
||||
constexpr Result ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
|
||||
} // namespace Audren
|
||||
|
||||
constexpr u8 BASE_REVISION = '0';
|
||||
|
||||
@@ -285,9 +285,8 @@ bool InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context,
|
||||
EffectContext& effect_context) {
|
||||
Result InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context) {
|
||||
std::vector<MixInfo::InParams> mix_in_params;
|
||||
|
||||
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
|
||||
|
||||
@@ -32,8 +32,8 @@ public:
|
||||
VAddr audio_codec_dsp_addr);
|
||||
bool UpdateEffects(EffectContext& effect_context, bool is_active);
|
||||
bool UpdateSplitterInfo(SplitterContext& splitter_context);
|
||||
ResultCode UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context);
|
||||
Result UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context);
|
||||
bool UpdateSinks(SinkContext& sink_context);
|
||||
bool UpdatePerformanceBuffer();
|
||||
bool UpdateErrorInfo(BehaviorInfo& in_behavior_info);
|
||||
|
||||
@@ -47,6 +47,9 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
case ThreadPriority::VeryHigh:
|
||||
windows_priority = THREAD_PRIORITY_HIGHEST;
|
||||
break;
|
||||
case ThreadPriority::Critical:
|
||||
windows_priority = THREAD_PRIORITY_TIME_CRITICAL;
|
||||
break;
|
||||
default:
|
||||
windows_priority = THREAD_PRIORITY_NORMAL;
|
||||
break;
|
||||
@@ -59,9 +62,10 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
pthread_t this_thread = pthread_self();
|
||||
|
||||
s32 max_prio = sched_get_priority_max(SCHED_OTHER);
|
||||
s32 min_prio = sched_get_priority_min(SCHED_OTHER);
|
||||
u32 level = static_cast<u32>(new_priority) + 1;
|
||||
const auto scheduling_type = SCHED_OTHER;
|
||||
s32 max_prio = sched_get_priority_max(scheduling_type);
|
||||
s32 min_prio = sched_get_priority_min(scheduling_type);
|
||||
u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
|
||||
|
||||
struct sched_param params;
|
||||
if (max_prio > min_prio) {
|
||||
@@ -70,7 +74,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4;
|
||||
}
|
||||
|
||||
pthread_setschedparam(this_thread, SCHED_OTHER, ¶ms);
|
||||
pthread_setschedparam(this_thread, scheduling_type, ¶ms);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -92,6 +92,7 @@ enum class ThreadPriority : u32 {
|
||||
Normal = 1,
|
||||
High = 2,
|
||||
VeryHigh = 3,
|
||||
Critical = 4,
|
||||
};
|
||||
|
||||
void SetCurrentThreadPriority(ThreadPriority new_priority);
|
||||
|
||||
@@ -30,6 +30,10 @@ namespace Common {
|
||||
#else
|
||||
return _udiv128(r[1], r[0], d, &remainder);
|
||||
#endif
|
||||
#else
|
||||
#ifdef __SIZEOF_INT128__
|
||||
const auto product = static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b);
|
||||
return static_cast<u64>(product / d);
|
||||
#else
|
||||
const u64 diva = a / d;
|
||||
const u64 moda = a % d;
|
||||
@@ -37,6 +41,7 @@ namespace Common {
|
||||
const u64 modb = b % d;
|
||||
return diva * b + moda * divb + moda * modb / d;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// This function multiplies 2 u64 values and produces a u128 value;
|
||||
|
||||
@@ -75,8 +75,8 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
|
||||
}
|
||||
|
||||
u64 NativeClock::GetRTSC() {
|
||||
TimePoint new_time_point{};
|
||||
TimePoint current_time_point{};
|
||||
TimePoint new_time_point{};
|
||||
|
||||
current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
|
||||
do {
|
||||
@@ -89,8 +89,7 @@ u64 NativeClock::GetRTSC() {
|
||||
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
|
||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
||||
current_time_point.pack, current_time_point.pack));
|
||||
/// The clock cannot be more precise than the guest timer, remove the lower bits
|
||||
return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
|
||||
return new_time_point.inner.accumulated_ticks;
|
||||
}
|
||||
|
||||
void NativeClock::Pause(bool is_paused) {
|
||||
|
||||
@@ -37,12 +37,8 @@ private:
|
||||
} inner;
|
||||
};
|
||||
|
||||
/// value used to reduce the native clocks accuracy as some apss rely on
|
||||
/// undefined behavior where the level of accuracy in the clock shouldn't
|
||||
/// be higher.
|
||||
static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
|
||||
|
||||
TimePoint time_point;
|
||||
|
||||
// factors
|
||||
u64 clock_rtsc_factor{};
|
||||
u64 cpu_rtsc_factor{};
|
||||
|
||||
@@ -222,7 +222,7 @@ add_library(core STATIC
|
||||
hle/kernel/k_page_buffer.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
hle/kernel/k_page_linked_list.h
|
||||
hle/kernel/k_page_group.h
|
||||
hle/kernel/k_page_table.cpp
|
||||
hle/kernel/k_page_table.h
|
||||
hle/kernel/k_port.cpp
|
||||
|
||||
@@ -95,7 +95,7 @@ void ARM_Interface::Run() {
|
||||
using Kernel::SuspendType;
|
||||
|
||||
while (true) {
|
||||
Kernel::KThread* current_thread{system.Kernel().CurrentScheduler()->GetCurrentThread()};
|
||||
Kernel::KThread* current_thread{Kernel::GetCurrentThreadPointer(system.Kernel())};
|
||||
Dynarmic::HaltReason hr{};
|
||||
|
||||
// Notify the debugger and go to sleep if a step was performed
|
||||
@@ -119,23 +119,16 @@ void ARM_Interface::Run() {
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit,
|
||||
// or if the thread is unable to continue for any reason.
|
||||
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit.
|
||||
if (Has(hr, breakpoint)) {
|
||||
RewindBreakpointInstruction();
|
||||
if (system.DebuggerEnabled()) {
|
||||
system.GetDebugger().NotifyThreadStopped(current_thread);
|
||||
}
|
||||
current_thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
system.GetDebugger().NotifyThreadStopped(current_thread);
|
||||
current_thread->RequestSuspend(SuspendType::Debug);
|
||||
break;
|
||||
}
|
||||
|
||||
// Notify the debugger and go to sleep if a watchpoint was hit.
|
||||
if (Has(hr, watchpoint)) {
|
||||
RewindBreakpointInstruction();
|
||||
if (system.DebuggerEnabled()) {
|
||||
system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
|
||||
}
|
||||
system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
|
||||
current_thread->RequestSuspend(SuspendType::Debug);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -204,7 +204,6 @@ public:
|
||||
static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
|
||||
static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4;
|
||||
static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5;
|
||||
static constexpr Dynarmic::HaltReason no_execute = Dynarmic::HaltReason::UserDefined6;
|
||||
|
||||
protected:
|
||||
/// System context that this ARM interface is running under.
|
||||
|
||||
@@ -48,12 +48,6 @@ public:
|
||||
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read64(vaddr);
|
||||
}
|
||||
std::optional<u32> MemoryReadCode(u32 vaddr) override {
|
||||
if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return MemoryRead32(vaddr);
|
||||
}
|
||||
|
||||
void MemoryWrite8(u32 vaddr, u8 value) override {
|
||||
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
|
||||
@@ -95,28 +89,21 @@ public:
|
||||
|
||||
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
|
||||
parent.LogBacktrace();
|
||||
LOG_ERROR(Core_ARM,
|
||||
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryRead32(pc));
|
||||
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
|
||||
MemoryReadCode(pc));
|
||||
}
|
||||
|
||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||
switch (exception) {
|
||||
case Dynarmic::A32::Exception::NoExecuteFault:
|
||||
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc);
|
||||
ReturnException(pc, ARM_Interface::no_execute);
|
||||
if (debugger_enabled) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
|
||||
return;
|
||||
default:
|
||||
if (debugger_enabled) {
|
||||
ReturnException(pc, ARM_Interface::breakpoint);
|
||||
return;
|
||||
}
|
||||
|
||||
parent.LogBacktrace();
|
||||
LOG_CRITICAL(Core_ARM,
|
||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||
exception, pc, MemoryRead32(pc), parent.IsInThumbMode());
|
||||
}
|
||||
|
||||
parent.LogBacktrace();
|
||||
LOG_CRITICAL(Core_ARM,
|
||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
|
||||
}
|
||||
|
||||
void CallSVC(u32 swi) override {
|
||||
@@ -154,20 +141,15 @@ public:
|
||||
|
||||
const auto match{parent.MatchingWatchpoint(addr, size, type)};
|
||||
if (match) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::watchpoint);
|
||||
parent.halted_watchpoint = match;
|
||||
ReturnException(parent.jit.load()->Regs()[15], ARM_Interface::watchpoint);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReturnException(u32 pc, Dynarmic::HaltReason hr) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.breakpoint_context.cpu_registers[15] = pc;
|
||||
parent.jit.load()->HaltExecution(hr);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32& parent;
|
||||
Core::Memory::Memory& memory;
|
||||
std::size_t num_interpreted_instructions{};
|
||||
|
||||
@@ -52,12 +52,6 @@ public:
|
||||
CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
|
||||
return {memory.Read64(vaddr), memory.Read64(vaddr + 8)};
|
||||
}
|
||||
std::optional<u32> MemoryReadCode(u64 vaddr) override {
|
||||
if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return MemoryRead32(vaddr);
|
||||
}
|
||||
|
||||
void MemoryWrite8(u64 vaddr, u8 value) override {
|
||||
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
|
||||
@@ -111,7 +105,7 @@ public:
|
||||
parent.LogBacktrace();
|
||||
LOG_ERROR(Core_ARM,
|
||||
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryRead32(pc));
|
||||
num_instructions, MemoryReadCode(pc));
|
||||
}
|
||||
|
||||
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
|
||||
@@ -144,19 +138,16 @@ public:
|
||||
case Dynarmic::A64::Exception::SendEventLocal:
|
||||
case Dynarmic::A64::Exception::Yield:
|
||||
return;
|
||||
case Dynarmic::A64::Exception::NoExecuteFault:
|
||||
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc);
|
||||
ReturnException(pc, ARM_Interface::no_execute);
|
||||
return;
|
||||
default:
|
||||
if (debugger_enabled) {
|
||||
ReturnException(pc, ARM_Interface::breakpoint);
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
|
||||
return;
|
||||
}
|
||||
|
||||
parent.LogBacktrace();
|
||||
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryRead32(pc));
|
||||
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,20 +188,15 @@ public:
|
||||
|
||||
const auto match{parent.MatchingWatchpoint(addr, size, type)};
|
||||
if (match) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::watchpoint);
|
||||
parent.halted_watchpoint = match;
|
||||
ReturnException(parent.jit.load()->GetPC(), ARM_Interface::watchpoint);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReturnException(u64 pc, Dynarmic::HaltReason hr) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.breakpoint_context.pc = pc;
|
||||
parent.jit.load()->HaltExecution(hr);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_64& parent;
|
||||
Core::Memory::Memory& memory;
|
||||
u64 tpidrro_el0 = 0;
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hardware_properties.h"
|
||||
@@ -41,11 +43,11 @@ CoreTiming::CoreTiming()
|
||||
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
constexpr char name[] = "yuzu:HostTiming";
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance, size_t id) {
|
||||
const std::string name = "yuzu:HostTiming_" + std::to_string(id);
|
||||
MicroProfileOnThreadCreate(name.c_str());
|
||||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
@@ -59,68 +61,97 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {};
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
if (is_multicore) {
|
||||
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||
const auto hardware_concurrency = std::thread::hardware_concurrency();
|
||||
size_t id = 0;
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++);
|
||||
if (hardware_concurrency > 8) {
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
paused = true;
|
||||
is_paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
std::atomic_thread_fence(std::memory_order_release);
|
||||
|
||||
event_cv.notify_all();
|
||||
wait_pause_cv.notify_all();
|
||||
for (auto& thread : worker_threads) {
|
||||
thread.join();
|
||||
}
|
||||
worker_threads.clear();
|
||||
ClearPendingEvents();
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
paused = is_paused;
|
||||
pause_event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
void CoreTiming::Pause(bool is_paused_) {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
Pause(is_paused);
|
||||
if (timer_thread) {
|
||||
if (!is_paused) {
|
||||
pause_event.Set();
|
||||
if (is_multicore) {
|
||||
is_paused = is_paused_;
|
||||
event_cv.notify_all();
|
||||
if (!is_paused_) {
|
||||
wait_pause_cv.notify_all();
|
||||
}
|
||||
}
|
||||
paused_state.store(is_paused_, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused_) {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_multicore) {
|
||||
is_paused = is_paused_;
|
||||
event_cv.notify_all();
|
||||
if (!is_paused_) {
|
||||
wait_pause_cv.notify_all();
|
||||
}
|
||||
}
|
||||
paused_state.store(is_paused_, std::memory_order_relaxed);
|
||||
if (is_multicore) {
|
||||
if (is_paused_) {
|
||||
wait_signal_cv.wait(main_lock, [this] { return pause_count == worker_threads.size(); });
|
||||
} else {
|
||||
wait_signal_cv.wait(main_lock, [this] { return pause_count == 0; });
|
||||
}
|
||||
event.Set();
|
||||
while (paused_set != is_paused)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
bool CoreTiming::IsRunning() const {
|
||||
return !paused_set;
|
||||
return !paused_state.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
return !(wait_set && event_queue.empty());
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
return !event_queue.empty() || pending_events.load(std::memory_order_relaxed) != 0;
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
|
||||
pending_events.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
|
||||
if (is_multicore) {
|
||||
event_cv.notify_one();
|
||||
}
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
std::scoped_lock scope{basic_lock};
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get() && e.user_data == user_data;
|
||||
});
|
||||
@@ -129,6 +160,7 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
pending_events.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,11 +200,12 @@ u64 CoreTiming::GetClockTicks() const {
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get();
|
||||
@@ -186,21 +219,28 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
}
|
||||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
basic_lock.unlock();
|
||||
|
||||
if (const auto event_type{evt.type.lock()}) {
|
||||
event_type->callback(
|
||||
evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)});
|
||||
sequence_mutex.lock();
|
||||
event_mutex.unlock();
|
||||
|
||||
event_type->guard.lock();
|
||||
sequence_mutex.unlock();
|
||||
const s64 delay = static_cast<s64>(GetGlobalTimeNs().count() - evt.time);
|
||||
event_type->callback(evt.user_data, std::chrono::nanoseconds{delay});
|
||||
event_type->guard.unlock();
|
||||
|
||||
event_mutex.lock();
|
||||
pending_events.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
basic_lock.lock();
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
}
|
||||
|
||||
@@ -213,26 +253,34 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadLoop() {
|
||||
const auto predicate = [this] { return !event_queue.empty() || is_paused; };
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
while (!is_paused && !shutting_down) {
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
if (*next_time > 0) {
|
||||
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
|
||||
event.WaitFor(next_time_ns);
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_cv.wait_for(main_lock, next_time_ns, predicate);
|
||||
}
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_cv.wait(main_lock, predicate);
|
||||
}
|
||||
wait_set = false;
|
||||
}
|
||||
paused_set = true;
|
||||
clock->Pause(true);
|
||||
pause_event.Wait();
|
||||
clock->Pause(false);
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
pause_count++;
|
||||
if (pause_count == worker_threads.size()) {
|
||||
clock->Pause(true);
|
||||
wait_signal_cv.notify_all();
|
||||
}
|
||||
wait_pause_cv.wait(main_lock, [this] { return !is_paused || shutting_down; });
|
||||
pause_count--;
|
||||
if (pause_count == 0) {
|
||||
clock->Pause(false);
|
||||
wait_signal_cv.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
@@ -14,7 +15,6 @@
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/wall_clock.h"
|
||||
|
||||
namespace Core::Timing {
|
||||
@@ -32,6 +32,7 @@ struct EventType {
|
||||
TimedCallback callback;
|
||||
/// A pointer to the name of the event.
|
||||
const std::string name;
|
||||
mutable std::mutex guard;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -131,7 +132,7 @@ private:
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
static void ThreadEntry(CoreTiming& instance, size_t id);
|
||||
void ThreadLoop();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
@@ -144,21 +145,25 @@ private:
|
||||
// accomodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
std::atomic<size_t> pending_events{};
|
||||
|
||||
std::shared_ptr<EventType> ev_lost;
|
||||
Common::Event event{};
|
||||
Common::Event pause_event{};
|
||||
std::mutex basic_lock;
|
||||
std::mutex advance_lock;
|
||||
std::unique_ptr<std::thread> timer_thread;
|
||||
std::atomic<bool> paused{};
|
||||
std::atomic<bool> paused_set{};
|
||||
std::atomic<bool> wait_set{};
|
||||
std::atomic<bool> shutting_down{};
|
||||
std::atomic<bool> has_started{};
|
||||
std::function<void()> on_thread_init{};
|
||||
|
||||
std::vector<std::thread> worker_threads;
|
||||
|
||||
std::condition_variable event_cv;
|
||||
std::condition_variable wait_pause_cv;
|
||||
std::condition_variable wait_signal_cv;
|
||||
mutable std::mutex event_mutex;
|
||||
mutable std::mutex sequence_mutex;
|
||||
|
||||
std::atomic<bool> paused_state{};
|
||||
bool is_paused{};
|
||||
bool shutting_down{};
|
||||
bool is_multicore{};
|
||||
size_t pause_count{};
|
||||
|
||||
/// Cycle timing
|
||||
u64 ticks{};
|
||||
|
||||
@@ -95,7 +95,7 @@ void* CpuManager::GetStartFuncParameter() {
|
||||
void CpuManager::MultiCoreRunGuestThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
|
||||
auto& host_context = thread->GetHostContext();
|
||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||
MultiCoreRunGuestLoop();
|
||||
@@ -132,7 +132,7 @@ void CpuManager::MultiCoreRunIdleThread() {
|
||||
void CpuManager::SingleCoreRunGuestThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
|
||||
auto& host_context = thread->GetHostContext();
|
||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||
SingleCoreRunGuestLoop();
|
||||
@@ -172,7 +172,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
{
|
||||
auto& kernel = system.Kernel();
|
||||
auto& scheduler = kernel.Scheduler(current_core);
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread();
|
||||
if (idle_count >= 4 || from_running_enviroment) {
|
||||
if (!from_running_enviroment) {
|
||||
system.CoreTiming().Idle();
|
||||
@@ -184,7 +184,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
}
|
||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
||||
system.CoreTiming().ResetTicks();
|
||||
scheduler.Unload(scheduler.GetCurrentThread());
|
||||
scheduler.Unload(scheduler.GetSchedulerCurrentThread());
|
||||
|
||||
auto& next_scheduler = kernel.Scheduler(current_core);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
|
||||
@@ -193,7 +193,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
// May have changed scheduler
|
||||
{
|
||||
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||
scheduler.Reload(scheduler.GetCurrentThread());
|
||||
scheduler.Reload(scheduler.GetSchedulerCurrentThread());
|
||||
if (!scheduler.IsIdle()) {
|
||||
idle_count = 0;
|
||||
}
|
||||
@@ -237,7 +237,8 @@ void CpuManager::RunThread(std::size_t core) {
|
||||
system.GPU().ObtainContext();
|
||||
}
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread();
|
||||
Kernel::SetCurrentThread(system.Kernel(), current_thread);
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
}
|
||||
|
||||
|
||||
@@ -252,6 +252,7 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const auto sep{std::find(command.begin(), command.end(), '=') - command.begin() + 1};
|
||||
const size_t reg{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
|
||||
arch->RegWrite(backend.GetActiveThread(), reg, std::string_view(command).substr(sep));
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
break;
|
||||
}
|
||||
case 'm': {
|
||||
|
||||
@@ -8,14 +8,14 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
|
||||
constexpr ResultCode ERROR_PATH_ALREADY_EXISTS{ErrorModule::FS, 2};
|
||||
constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
|
||||
constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
|
||||
constexpr ResultCode ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
|
||||
constexpr ResultCode ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
|
||||
constexpr ResultCode ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
|
||||
constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
|
||||
constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
|
||||
constexpr Result ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
|
||||
constexpr Result ERROR_PATH_ALREADY_EXISTS{ErrorModule::FS, 2};
|
||||
constexpr Result ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
|
||||
constexpr Result ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
|
||||
constexpr Result ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
|
||||
constexpr Result ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
|
||||
constexpr Result ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
|
||||
constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
|
||||
constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
|
||||
|
||||
} // namespace FileSys
|
||||
|
||||
@@ -8,12 +8,12 @@ namespace Core::Frontend {
|
||||
|
||||
ErrorApplet::~ErrorApplet() = default;
|
||||
|
||||
void DefaultErrorApplet::ShowError(ResultCode error, std::function<void()> finished) const {
|
||||
void DefaultErrorApplet::ShowError(Result error, std::function<void()> finished) const {
|
||||
LOG_CRITICAL(Service_Fatal, "Application requested error display: {:04}-{:04} (raw={:08X})",
|
||||
error.module.Value(), error.description.Value(), error.raw);
|
||||
}
|
||||
|
||||
void DefaultErrorApplet::ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
void DefaultErrorApplet::ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const {
|
||||
LOG_CRITICAL(
|
||||
Service_Fatal,
|
||||
@@ -21,7 +21,7 @@ void DefaultErrorApplet::ShowErrorWithTimestamp(ResultCode error, std::chrono::s
|
||||
error.module.Value(), error.description.Value(), error.raw, time.count());
|
||||
}
|
||||
|
||||
void DefaultErrorApplet::ShowCustomErrorText(ResultCode error, std::string main_text,
|
||||
void DefaultErrorApplet::ShowCustomErrorText(Result error, std::string main_text,
|
||||
std::string detail_text,
|
||||
std::function<void()> finished) const {
|
||||
LOG_CRITICAL(Service_Fatal,
|
||||
|
||||
@@ -14,22 +14,22 @@ class ErrorApplet {
|
||||
public:
|
||||
virtual ~ErrorApplet();
|
||||
|
||||
virtual void ShowError(ResultCode error, std::function<void()> finished) const = 0;
|
||||
virtual void ShowError(Result error, std::function<void()> finished) const = 0;
|
||||
|
||||
virtual void ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
virtual void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const = 0;
|
||||
|
||||
virtual void ShowCustomErrorText(ResultCode error, std::string dialog_text,
|
||||
virtual void ShowCustomErrorText(Result error, std::string dialog_text,
|
||||
std::string fullscreen_text,
|
||||
std::function<void()> finished) const = 0;
|
||||
};
|
||||
|
||||
class DefaultErrorApplet final : public ErrorApplet {
|
||||
public:
|
||||
void ShowError(ResultCode error, std::function<void()> finished) const override;
|
||||
void ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
void ShowError(Result error, std::function<void()> finished) const override;
|
||||
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const override;
|
||||
void ShowCustomErrorText(ResultCode error, std::string main_text, std::string detail_text,
|
||||
void ShowCustomErrorText(Result error, std::string main_text, std::string detail_text,
|
||||
std::function<void()> finished) const override;
|
||||
};
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
namespace IPC {
|
||||
|
||||
constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301};
|
||||
constexpr Result ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301};
|
||||
|
||||
class RequestHelperBase {
|
||||
protected:
|
||||
@@ -176,7 +176,7 @@ public:
|
||||
void PushImpl(float value);
|
||||
void PushImpl(double value);
|
||||
void PushImpl(bool value);
|
||||
void PushImpl(ResultCode value);
|
||||
void PushImpl(Result value);
|
||||
|
||||
template <typename T>
|
||||
void Push(T value) {
|
||||
@@ -251,7 +251,7 @@ void ResponseBuilder::PushRaw(const T& value) {
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(ResultCode value) {
|
||||
inline void ResponseBuilder::PushImpl(Result value) {
|
||||
// Result codes are actually 64-bit in the IPC buffer, but only the high part is discarded.
|
||||
Push(value.raw);
|
||||
Push<u32>(0);
|
||||
@@ -481,8 +481,8 @@ inline bool RequestParser::Pop() {
|
||||
}
|
||||
|
||||
template <>
|
||||
inline ResultCode RequestParser::Pop() {
|
||||
return ResultCode{Pop<u32>()};
|
||||
inline Result RequestParser::Pop() {
|
||||
return Result{Pop<u32>()};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -188,8 +188,8 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf) {
|
||||
Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf) {
|
||||
ParseCommandBuffer(handle_table, src_cmdbuf, true);
|
||||
|
||||
if (command_header->IsCloseCommand()) {
|
||||
@@ -202,7 +202,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
|
||||
Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
|
||||
auto current_offset = handles_offset;
|
||||
auto& owner_process = *requesting_thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#include "core/hle/ipc.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
@@ -71,10 +71,10 @@ public:
|
||||
* it should be used to differentiate which client (As in ClientSession) we're answering to.
|
||||
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
|
||||
* this request (ServerSession, Originator thread, Translated command buffer, etc).
|
||||
* @returns ResultCode the result code of the translate operation.
|
||||
* @returns Result the result code of the translate operation.
|
||||
*/
|
||||
virtual ResultCode HandleSyncRequest(Kernel::KServerSession& session,
|
||||
Kernel::HLERequestContext& context) = 0;
|
||||
virtual Result HandleSyncRequest(Kernel::KServerSession& session,
|
||||
Kernel::HLERequestContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Signals that a client has just connected to this HLE handler and keeps the
|
||||
@@ -212,11 +212,10 @@ public:
|
||||
}
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
|
||||
|
||||
/// Writes data from this context back to the requesting process/thread.
|
||||
ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread);
|
||||
Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
|
||||
|
||||
u32_le GetHipcCommand() const {
|
||||
return command;
|
||||
|
||||
@@ -90,8 +90,7 @@ public:
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
@@ -108,7 +107,7 @@ private:
|
||||
|
||||
} // namespace
|
||||
|
||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -131,7 +130,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -164,7 +163,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -232,9 +231,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
@@ -285,9 +284,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -25,8 +25,7 @@ public:
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
|
||||
s32 count) {
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
@@ -39,8 +38,8 @@ public:
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
[[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
@@ -54,11 +53,11 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
[[nodiscard]] Result Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
|
||||
@@ -59,8 +59,8 @@ bool KClientPort::IsSignaled() const {
|
||||
return num_sessions < max_sessions;
|
||||
}
|
||||
|
||||
ResultCode KClientPort::CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager) {
|
||||
Result KClientPort::CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager) {
|
||||
// Reserve a new session from the resource limit.
|
||||
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
|
||||
@@ -53,8 +53,8 @@ public:
|
||||
void Destroy() override;
|
||||
bool IsSignaled() const override;
|
||||
|
||||
ResultCode CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
|
||||
Result CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
|
||||
|
||||
private:
|
||||
std::atomic<s32> num_sessions{};
|
||||
|
||||
@@ -21,8 +21,8 @@ void KClientSession::Destroy() {
|
||||
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Signal the server session that new data is available
|
||||
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
@@ -46,8 +46,8 @@ public:
|
||||
return parent;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
void OnServerClosed();
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
@@ -19,7 +19,7 @@ namespace Kernel {
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
|
||||
|
||||
ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
// Set members.
|
||||
m_owner = kernel.CurrentProcess();
|
||||
|
||||
@@ -62,7 +62,7 @@ void KCodeMemory::Finalize() {
|
||||
m_owner->Close();
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -82,7 +82,7 @@ ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -99,7 +99,7 @@ ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -133,7 +133,7 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
@@ -29,13 +29,13 @@ class KCodeMemory final
|
||||
public:
|
||||
explicit KCodeMemory(KernelCore& kernel_);
|
||||
|
||||
ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
void Finalize();
|
||||
|
||||
ResultCode Map(VAddr address, size_t size);
|
||||
ResultCode Unmap(VAddr address, size_t size);
|
||||
ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
ResultCode UnmapFromOwner(VAddr address, size_t size);
|
||||
Result Map(VAddr address, size_t size);
|
||||
Result Unmap(VAddr address, size_t size);
|
||||
Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
Result UnmapFromOwner(VAddr address, size_t size);
|
||||
|
||||
bool IsInitialized() const {
|
||||
return m_is_initialized;
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
KPageLinkedList m_page_group{};
|
||||
KPageGroup m_page_group{};
|
||||
KProcess* m_owner{};
|
||||
VAddr m_address{};
|
||||
KLightLock m_lock;
|
||||
|
||||
@@ -61,8 +61,7 @@ public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||
: KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||
|
||||
@@ -80,8 +79,7 @@ public:
|
||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
@@ -105,8 +103,8 @@ KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
@@ -126,7 +124,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
ResultCode result{ResultSuccess};
|
||||
Result result{ResultSuccess};
|
||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
@@ -146,8 +144,8 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
|
||||
// Wait for the address.
|
||||
@@ -261,7 +259,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||
|
||||
@@ -25,12 +25,12 @@ public:
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
[[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
void SignalImpl(KThread* thread);
|
||||
|
||||
@@ -8,7 +8,7 @@ namespace Kernel {
|
||||
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
KHandleTable::~KHandleTable() = default;
|
||||
|
||||
ResultCode KHandleTable::Finalize() {
|
||||
Result KHandleTable::Finalize() {
|
||||
// Get the table and clear our record of it.
|
||||
u16 saved_table_size = 0;
|
||||
{
|
||||
@@ -62,7 +62,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
@@ -85,7 +85,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||
Result KHandleTable::Reserve(Handle* out_handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ public:
|
||||
explicit KHandleTable(KernelCore& kernel_);
|
||||
~KHandleTable();
|
||||
|
||||
ResultCode Initialize(s32 size) {
|
||||
Result Initialize(s32 size) {
|
||||
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
|
||||
|
||||
// Initialize all fields.
|
||||
@@ -60,7 +60,7 @@ public:
|
||||
return m_max_count;
|
||||
}
|
||||
|
||||
ResultCode Finalize();
|
||||
Result Finalize();
|
||||
bool Remove(Handle handle);
|
||||
|
||||
template <typename T = KAutoObject>
|
||||
@@ -100,10 +100,10 @@ public:
|
||||
return this->template GetObjectWithoutPseudoHandle<T>(handle);
|
||||
}
|
||||
|
||||
ResultCode Reserve(Handle* out_handle);
|
||||
Result Reserve(Handle* out_handle);
|
||||
void Unreserve(Handle handle);
|
||||
|
||||
ResultCode Add(Handle* out_handle, KAutoObject* obj);
|
||||
Result Add(Handle* out_handle, KAutoObject* obj);
|
||||
void Register(Handle handle, KAutoObject* obj);
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -15,8 +15,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& scheduler = kernel.Scheduler(core_id);
|
||||
auto& current_thread = *scheduler.GetCurrentThread();
|
||||
auto& current_thread = GetCurrentThread(kernel);
|
||||
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
@@ -26,7 +25,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
process->PinCurrentThread(core_id);
|
||||
|
||||
// Set the interrupt flag for the thread.
|
||||
scheduler.GetCurrentThread()->SetInterruptFlag();
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,8 +17,7 @@ public:
|
||||
bool term)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Only process waits if we're allowed to.
|
||||
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
|
||||
return;
|
||||
|
||||
@@ -15,8 +15,7 @@ class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/initial_process.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
@@ -208,8 +208,8 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
||||
@@ -257,7 +257,7 @@ ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t nu
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
|
||||
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
@@ -293,8 +293,8 @@ ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_page
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
|
||||
u32 option, u64 process_id, u8 fill_pattern) {
|
||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
@@ -370,12 +370,12 @@ void KMemoryManager::Close(PAddr address, size_t num_pages) {
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(const KPageLinkedList& pg) {
|
||||
void KMemoryManager::Close(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Close(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
}
|
||||
void KMemoryManager::Open(const KPageLinkedList& pg) {
|
||||
void KMemoryManager::Open(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Open(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ class System;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageLinkedList;
|
||||
class KPageGroup;
|
||||
|
||||
class KMemoryManager final {
|
||||
public:
|
||||
@@ -65,17 +65,17 @@ public:
|
||||
}
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
|
||||
ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
|
||||
void Close(PAddr address, size_t num_pages);
|
||||
void Close(const KPageLinkedList& pg);
|
||||
void Close(const KPageGroup& pg);
|
||||
|
||||
void Open(PAddr address, size_t num_pages);
|
||||
void Open(const KPageLinkedList& pg);
|
||||
void Open(const KPageGroup& pg);
|
||||
|
||||
public:
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
@@ -262,8 +262,8 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random);
|
||||
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
||||
bool random);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageLinkedList final {
|
||||
class KPageGroup final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
@@ -36,8 +36,8 @@ public:
|
||||
};
|
||||
|
||||
public:
|
||||
KPageLinkedList() = default;
|
||||
KPageLinkedList(u64 address, u64 num_pages) {
|
||||
KPageGroup() = default;
|
||||
KPageGroup(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(KPageLinkedList& other) const {
|
||||
bool IsEqual(KPageGroup& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
@@ -72,7 +72,7 @@ public:
|
||||
return this_node == nodes.end() && other_node == other.nodes.end();
|
||||
}
|
||||
|
||||
ResultCode AddBlock(u64 address, u64 num_pages) {
|
||||
Result AddBlock(u64 address, u64 num_pages) {
|
||||
if (!num_pages) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
@@ -47,9 +47,9 @@ KPageTable::KPageTable(Core::System& system_)
|
||||
|
||||
KPageTable::~KPageTable() = default;
|
||||
|
||||
ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
|
||||
bool enable_aslr, VAddr code_addr,
|
||||
std::size_t code_size, KMemoryManager::Pool pool) {
|
||||
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
KMemoryManager::Pool pool) {
|
||||
|
||||
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
|
||||
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
|
||||
@@ -65,7 +65,6 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
||||
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
|
||||
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
|
||||
|
||||
ASSERT(start <= code_addr);
|
||||
ASSERT(code_addr < code_addr + code_size);
|
||||
ASSERT(code_addr + code_size - 1 <= end - 1);
|
||||
|
||||
@@ -258,8 +257,8 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
||||
return InitializeMemoryLayout(start, end);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
const u64 size{num_pages * PageSize};
|
||||
|
||||
// Validate the mapping request.
|
||||
@@ -272,7 +271,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::None, KMemoryAttribute::None));
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
||||
&pg, num_pages,
|
||||
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
|
||||
@@ -284,7 +283,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@@ -314,7 +313,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
// Create page groups for the memory being mapped.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
AddRegionToPages(src_address, num_pages, pg);
|
||||
|
||||
// Reprotect the source as kernel-read/not mapped.
|
||||
@@ -345,8 +344,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy) {
|
||||
Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@@ -490,7 +489,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
return address;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
|
||||
Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
@@ -542,7 +541,7 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
|
||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
@@ -631,8 +630,8 @@ bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size
|
||||
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
KPageTable& src_page_table, VAddr src_addr) {
|
||||
Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
@@ -661,7 +660,7 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -722,7 +721,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the new memory.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
&pg, (size - mapped_size) / PageSize,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
||||
@@ -904,7 +903,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -973,7 +972,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
}
|
||||
|
||||
// Make a page group for the unmap region.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
{
|
||||
auto& impl = this->PageTableImpl();
|
||||
|
||||
@@ -1135,7 +1134,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState src_state{};
|
||||
@@ -1148,7 +1147,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
KPageLinkedList page_linked_list;
|
||||
KPageGroup page_linked_list;
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
AddRegionToPages(src_addr, num_pages, page_linked_list);
|
||||
@@ -1174,7 +1173,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState src_state{};
|
||||
@@ -1189,8 +1188,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
|
||||
KPageLinkedList src_pages;
|
||||
KPageLinkedList dst_pages;
|
||||
KPageGroup src_pages;
|
||||
KPageGroup dst_pages;
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
AddRegionToPages(src_addr, num_pages, src_pages);
|
||||
@@ -1216,8 +1215,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
||||
KMemoryPermission perm) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
VAddr cur_addr{addr};
|
||||
@@ -1240,8 +1239,8 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
// Check that the map is in range.
|
||||
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
||||
const std::size_t size{num_pages * PageSize};
|
||||
@@ -1264,10 +1263,10 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||
|
||||
// Ensure this is a valid map request.
|
||||
@@ -1304,7 +1303,7 @@ ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::siz
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
|
||||
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
VAddr cur_addr{addr};
|
||||
@@ -1322,8 +1321,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||
KMemoryState state) {
|
||||
Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
|
||||
// Check that the unmap is in range.
|
||||
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
||||
const std::size_t size{num_pages * PageSize};
|
||||
@@ -1346,7 +1344,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||
Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||
// Check that the unmap is in range.
|
||||
const std::size_t size = num_pages * PageSize;
|
||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||
@@ -1370,10 +1368,10 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||
Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||
// Ensure that the page group isn't null.
|
||||
ASSERT(out != nullptr);
|
||||
|
||||
@@ -1395,8 +1393,8 @@ ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
// Lock the table.
|
||||
@@ -1468,7 +1466,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
|
||||
return QueryInfoImpl(addr);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
|
||||
Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState state{};
|
||||
@@ -1486,7 +1484,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState state{};
|
||||
@@ -1501,8 +1499,8 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
// Lock the table.
|
||||
@@ -1529,7 +1527,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
|
||||
Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
|
||||
KMemoryAttribute::SetMask);
|
||||
@@ -1564,7 +1562,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
Result KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
@@ -1576,7 +1574,7 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
// Lock the physical memory mutex.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -1643,7 +1641,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the heap extension.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
||||
&pg, allocation_size / PageSize,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option)));
|
||||
@@ -1718,7 +1716,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
|
||||
if (is_map_only) {
|
||||
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
||||
} else {
|
||||
KPageLinkedList page_group;
|
||||
KPageGroup page_group;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
&page_group, needed_num_pages,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
||||
@@ -1730,11 +1728,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
|
||||
return addr;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryPermission perm{};
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
if (const Result result{CheckMemoryState(
|
||||
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
|
||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
|
||||
@@ -1753,11 +1751,11 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryPermission perm{};
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
if (const Result result{CheckMemoryState(
|
||||
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
|
||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
|
||||
@@ -1776,7 +1774,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
|
||||
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
|
||||
return this->LockMemoryAndOpen(
|
||||
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||
@@ -1786,15 +1784,14 @@ ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::
|
||||
KMemoryAttribute::Locked);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
|
||||
const KPageLinkedList& pg) {
|
||||
Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
|
||||
return this->UnlockMemory(
|
||||
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||
Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
|
||||
|
||||
return ResultSuccess;
|
||||
@@ -1819,7 +1816,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
|
||||
}
|
||||
|
||||
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
|
||||
KPageLinkedList& page_linked_list) {
|
||||
KPageGroup& page_linked_list) {
|
||||
VAddr addr{start};
|
||||
while (addr < start + (num_pages * PageSize)) {
|
||||
const PAddr paddr{GetPhysicalAddr(addr)};
|
||||
@@ -1838,8 +1835,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
|
||||
IsKernel() ? 1 : 4);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation) {
|
||||
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
@@ -1863,8 +1860,8 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr) {
|
||||
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
ASSERT(num_pages > 0);
|
||||
@@ -2006,10 +2003,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
// Validate the states match expectation.
|
||||
R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
|
||||
R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
|
||||
@@ -2018,12 +2015,11 @@ ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState st
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Get information about the first block.
|
||||
@@ -2061,12 +2057,12 @@ ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Get information about the first block.
|
||||
@@ -2123,11 +2119,11 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
|
||||
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
||||
Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
||||
// Validate basic preconditions.
|
||||
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
|
||||
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
@@ -2181,11 +2177,11 @@ ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_pad
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryPermission new_perm,
|
||||
KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
|
||||
Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryPermission new_perm,
|
||||
KMemoryAttribute lock_attr, const KPageGroup* pg) {
|
||||
// Validate basic preconditions.
|
||||
ASSERT((attr_mask & lock_attr) == lock_attr);
|
||||
ASSERT((attr & lock_attr) == lock_attr);
|
||||
|
||||
@@ -33,51 +33,49 @@ public:
|
||||
explicit KPageTable(Core::System& system_);
|
||||
~KPageTable();
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
|
||||
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
Result MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
}
|
||||
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
|
||||
ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm);
|
||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
ResultCode SetMaxHeapSize(std::size_t size);
|
||||
ResultCode SetHeapSize(VAddr* out, std::size_t size);
|
||||
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
Result ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(std::size_t size);
|
||||
Result SetHeapSize(VAddr* out, std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
|
||||
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
|
||||
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
@@ -102,83 +100,78 @@ private:
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
|
||||
Result InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
VAddr GetRegionAddress(KMemoryState state) const;
|
||||
std::size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr);
|
||||
}
|
||||
|
||||
ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
ResultCode CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||
ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||
const KPageLinkedList* pg);
|
||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||
Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||
const KPageGroup* pg);
|
||||
|
||||
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||
bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||
Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return general_lock.IsLockedByCurrentThread();
|
||||
|
||||
@@ -50,7 +50,7 @@ bool KPort::IsServerClosed() const {
|
||||
return state == State::ServerClosed;
|
||||
}
|
||||
|
||||
ResultCode KPort::EnqueueSession(KServerSession* session) {
|
||||
Result KPort::EnqueueSession(KServerSession* session) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
R_UNLESS(state == State::Normal, ResultPortClosed);
|
||||
|
||||
@@ -34,7 +34,7 @@ public:
|
||||
|
||||
bool IsServerClosed() const;
|
||||
|
||||
ResultCode EnqueueSession(KServerSession* session);
|
||||
Result EnqueueSession(KServerSession* session);
|
||||
|
||||
KClientPort& GetClientPort() {
|
||||
return client;
|
||||
|
||||
@@ -67,8 +67,8 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit) {
|
||||
Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
process->name = std::move(process_name);
|
||||
@@ -176,7 +176,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
|
||||
KThread* cur_thread =
|
||||
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
|
||||
|
||||
// If the thread isn't terminated, pin it.
|
||||
if (!cur_thread->IsTerminationRequested()) {
|
||||
@@ -193,7 +194,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
|
||||
KThread* cur_thread =
|
||||
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
|
||||
|
||||
// Unpin it.
|
||||
cur_thread->Unpin();
|
||||
@@ -217,8 +219,8 @@ void KProcess::UnpinThread(KThread* thread) {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
// Lock ourselves, to prevent concurrent access.
|
||||
KScopedLightLock lk(state_lock);
|
||||
|
||||
@@ -282,7 +284,7 @@ void KProcess::UnregisterThread(KThread* thread) {
|
||||
thread_list.remove(thread);
|
||||
}
|
||||
|
||||
ResultCode KProcess::Reset() {
|
||||
Result KProcess::Reset() {
|
||||
// Lock the process and the scheduler.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
@@ -296,7 +298,7 @@ ResultCode KProcess::Reset() {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KProcess::SetActivity(ProcessActivity activity) {
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
// Lock ourselves and the scheduler.
|
||||
KScopedLightLock lk{state_lock};
|
||||
KScopedLightLock list_lk{list_lock};
|
||||
@@ -340,8 +342,7 @@ ResultCode KProcess::SetActivity(ProcessActivity activity) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
std::size_t code_size) {
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
program_id = metadata.GetTitleID();
|
||||
ideal_core = metadata.GetMainThreadCore();
|
||||
is_64bit_process = metadata.Is64BitProgram();
|
||||
@@ -356,24 +357,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
return ResultLimitReached;
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, KMemoryManager::Pool::Application)};
|
||||
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
|
||||
0x8000000, code_size,
|
||||
KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const ResultCode result{
|
||||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
@@ -420,11 +421,11 @@ void KProcess::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||
for (auto& thread : in_thread_list) {
|
||||
for (auto* thread : in_thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread == kernel.CurrentScheduler()->GetCurrentThread())
|
||||
if (thread == GetCurrentThreadPointer(kernel))
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
@@ -480,7 +481,7 @@ void KProcess::Finalize() {
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
KThreadLocalPage* tlp = nullptr;
|
||||
VAddr tlr = 0;
|
||||
|
||||
@@ -531,7 +532,7 @@ ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
KThreadLocalPage* page_to_free = nullptr;
|
||||
|
||||
// Release the region.
|
||||
@@ -662,7 +663,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
|
||||
@@ -110,8 +110,8 @@ public:
|
||||
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit);
|
||||
static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit);
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
@@ -133,11 +133,11 @@ public:
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
ResultCode SignalToAddress(VAddr address) {
|
||||
Result SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
@@ -145,17 +145,16 @@ public:
|
||||
return condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
}
|
||||
|
||||
ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
}
|
||||
|
||||
ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
@@ -322,7 +321,7 @@ public:
|
||||
/// @pre The process must be in a signaled state. If this is called on a
|
||||
/// process instance that is not signaled, ERR_INVALID_STATE will be
|
||||
/// returned.
|
||||
ResultCode Reset();
|
||||
Result Reset();
|
||||
|
||||
/**
|
||||
* Loads process-specifics configuration info with metadata provided
|
||||
@@ -333,7 +332,7 @@ public:
|
||||
* @returns ResultSuccess if all relevant metadata was able to be
|
||||
* loaded and parsed. Otherwise, an error code is returned.
|
||||
*/
|
||||
ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
|
||||
/**
|
||||
* Starts the main application thread for this process.
|
||||
@@ -367,7 +366,7 @@ public:
|
||||
|
||||
void DoWorkerTaskImpl();
|
||||
|
||||
ResultCode SetActivity(ProcessActivity activity);
|
||||
Result SetActivity(ProcessActivity activity);
|
||||
|
||||
void PinCurrentThread(s32 core_id);
|
||||
void UnpinCurrentThread(s32 core_id);
|
||||
@@ -377,17 +376,17 @@ public:
|
||||
return state_lock;
|
||||
}
|
||||
|
||||
ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
|
||||
[[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
ResultCode DeleteThreadLocalRegion(VAddr addr);
|
||||
Result DeleteThreadLocalRegion(VAddr addr);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Debug watchpoint management
|
||||
@@ -423,7 +422,7 @@ private:
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
|
||||
@@ -27,7 +27,7 @@ void KReadableEvent::Destroy() {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Signal() {
|
||||
Result KReadableEvent::Signal() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
@@ -38,13 +38,13 @@ ResultCode KReadableEvent::Signal() {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Clear() {
|
||||
Result KReadableEvent::Clear() {
|
||||
Reset();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Reset() {
|
||||
Result KReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
|
||||
@@ -33,9 +33,9 @@ public:
|
||||
bool IsSignaled() const override;
|
||||
void Destroy() override;
|
||||
|
||||
ResultCode Signal();
|
||||
ResultCode Clear();
|
||||
ResultCode Reset();
|
||||
Result Signal();
|
||||
Result Clear();
|
||||
Result Reset();
|
||||
|
||||
private:
|
||||
bool is_signaled{};
|
||||
|
||||
@@ -73,7 +73,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
|
||||
return value;
|
||||
}
|
||||
|
||||
ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
R_UNLESS(current_values[index] <= value, ResultInvalidState);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "core/hle/kernel/k_light_condition_variable.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
s64 GetPeakValue(LimitableResource which) const;
|
||||
s64 GetFreeValue(LimitableResource which) const;
|
||||
|
||||
ResultCode SetLimitValue(LimitableResource which, s64 value);
|
||||
Result SetLimitValue(LimitableResource which, s64 value);
|
||||
|
||||
bool Reserve(LimitableResource which, s64 value);
|
||||
bool Reserve(LimitableResource which, s64 value, s64 timeout);
|
||||
|
||||
@@ -317,7 +317,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
|
||||
|
||||
{
|
||||
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
|
||||
if (best_thread == GetCurrentThread()) {
|
||||
if (best_thread == GetCurrentThreadPointer(kernel)) {
|
||||
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -463,7 +463,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -551,7 +551,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -642,7 +642,7 @@ KScheduler::~KScheduler() {
|
||||
ASSERT(!idle_thread);
|
||||
}
|
||||
|
||||
KThread* KScheduler::GetCurrentThread() const {
|
||||
KThread* KScheduler::GetSchedulerCurrentThread() const {
|
||||
if (auto result = current_thread.load(); result) {
|
||||
return result;
|
||||
}
|
||||
@@ -654,7 +654,7 @@ u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleCurrentCore() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
|
||||
|
||||
auto& phys_core = system.Kernel().PhysicalCore(core_id);
|
||||
if (phys_core.IsInterrupted()) {
|
||||
@@ -665,7 +665,7 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
GetCurrentThread()->EnableDispatch();
|
||||
GetCurrentThread(system.Kernel()).EnableDispatch();
|
||||
guard.Unlock();
|
||||
}
|
||||
}
|
||||
@@ -718,13 +718,18 @@ void KScheduler::Reload(KThread* thread) {
|
||||
|
||||
void KScheduler::SwitchContextStep2() {
|
||||
// Load context of new thread
|
||||
Reload(GetCurrentThread());
|
||||
Reload(GetCurrentThreadPointer(system.Kernel()));
|
||||
|
||||
RescheduleCurrentCore();
|
||||
}
|
||||
|
||||
void KScheduler::Schedule() {
|
||||
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
KThread* previous_thread = GetCurrentThread();
|
||||
KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
KThread* next_thread = state.highest_priority_thread;
|
||||
|
||||
state.needs_scheduling.store(false);
|
||||
@@ -762,6 +767,7 @@ void KScheduler::ScheduleImpl() {
|
||||
old_context = &previous_thread->GetHostContext();
|
||||
|
||||
// Set the new thread.
|
||||
SetCurrentThread(system.Kernel(), next_thread);
|
||||
current_thread.store(next_thread);
|
||||
|
||||
guard.Unlock();
|
||||
@@ -805,6 +811,7 @@ void KScheduler::SwitchToCurrent() {
|
||||
}
|
||||
}
|
||||
auto thread = next_thread ? next_thread : idle_thread;
|
||||
SetCurrentThread(system.Kernel(), thread);
|
||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||
} while (!is_switch_pending());
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
void Reload(KThread* thread);
|
||||
|
||||
/// Gets the current running thread
|
||||
[[nodiscard]] KThread* GetCurrentThread() const;
|
||||
[[nodiscard]] KThread* GetSchedulerCurrentThread() const;
|
||||
|
||||
/// Gets the idle thread
|
||||
[[nodiscard]] KThread* GetIdleThread() const {
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
|
||||
/// Returns true if the scheduler is idle
|
||||
[[nodiscard]] bool IsIdle() const {
|
||||
return GetCurrentThread() == idle_thread;
|
||||
return GetSchedulerCurrentThread() == idle_thread;
|
||||
}
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
@@ -149,10 +149,7 @@ private:
|
||||
|
||||
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
|
||||
|
||||
void Schedule() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
void Schedule();
|
||||
|
||||
/// Switches the CPU's active thread context to that of the specified thread
|
||||
void ScheduleImpl();
|
||||
|
||||
@@ -79,7 +79,7 @@ std::size_t KServerSession::NumDomainRequestHandlers() const {
|
||||
return manager->DomainHandlerCount();
|
||||
}
|
||||
|
||||
ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -123,7 +123,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
|
||||
Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
|
||||
|
||||
@@ -143,8 +143,8 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode result = ResultSuccess;
|
||||
Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
Result result = ResultSuccess;
|
||||
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (manager->HasSessionRequestHandler(context)) {
|
||||
@@ -173,8 +173,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(thread, memory);
|
||||
}
|
||||
|
||||
|
||||
@@ -73,10 +73,10 @@ public:
|
||||
* @param memory Memory context to handle the sync request under.
|
||||
* @param core_timing Core timing context to schedule the request event under.
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
* @returns Result from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
@@ -103,14 +103,14 @@ public:
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
|
||||
Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
Result CompleteSyncRequest(HLERequestContext& context);
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// This session's HLE request handlers
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
|
||||
@@ -18,12 +18,10 @@ KSharedMemory::~KSharedMemory() {
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageLinkedList&& page_list_,
|
||||
Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_,
|
||||
PAddr physical_address_, std::size_t size_,
|
||||
std::string name_) {
|
||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_) {
|
||||
// Set members.
|
||||
owner_process = owner_process_;
|
||||
device_memory = &device_memory_;
|
||||
@@ -67,8 +65,8 @@ void KSharedMemory::Finalize() {
|
||||
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions) {
|
||||
Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions) {
|
||||
const u64 page_count{(map_size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
@@ -86,7 +84,7 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size
|
||||
ConvertToKMemoryPermission(permissions));
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
||||
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
||||
const u64 page_count{(unmap_size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -26,10 +26,10 @@ public:
|
||||
explicit KSharedMemory(KernelCore& kernel_);
|
||||
~KSharedMemory() override;
|
||||
|
||||
ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_);
|
||||
Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_);
|
||||
|
||||
/**
|
||||
* Maps a shared memory block to an address in the target process' address space
|
||||
@@ -38,8 +38,8 @@ public:
|
||||
* @param map_size Size of the shared memory block to map
|
||||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions);
|
||||
Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Unmaps a shared memory block from an address in the target process' address space
|
||||
@@ -47,7 +47,7 @@ public:
|
||||
* @param address Address in system memory to unmap shared memory block
|
||||
* @param unmap_size Size of the shared memory block to unmap
|
||||
*/
|
||||
ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
|
||||
Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
@@ -77,7 +77,7 @@ public:
|
||||
private:
|
||||
Core::DeviceMemory* device_memory;
|
||||
KProcess* owner_process{};
|
||||
KPageLinkedList page_list;
|
||||
KPageGroup page_list;
|
||||
Svc::MemoryPermission owner_permission{};
|
||||
Svc::MemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
|
||||
@@ -22,7 +22,7 @@ public:
|
||||
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
|
||||
|
||||
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||
ResultCode wait_result) override {
|
||||
Result wait_result) override {
|
||||
// Determine the sync index, and unlink all nodes.
|
||||
s32 sync_index = -1;
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
@@ -45,8 +45,7 @@ public:
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove all nodes from our list.
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||
@@ -72,9 +71,9 @@ void KSynchronizationObject::Finalize() {
|
||||
KAutoObject::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout) {
|
||||
Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout) {
|
||||
// Allocate space on stack for thread nodes.
|
||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||
|
||||
@@ -148,7 +147,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||
|
||||
KSynchronizationObject::~KSynchronizationObject() = default;
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
void KSynchronizationObject::NotifyAvailable(Result result) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// If we're not signaled, we've nothing to notify.
|
||||
|
||||
@@ -24,9 +24,9 @@ public:
|
||||
KThread* thread{};
|
||||
};
|
||||
|
||||
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout);
|
||||
[[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout);
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
@@ -72,7 +72,7 @@ protected:
|
||||
|
||||
virtual void OnFinalizeSynchronizationObject() {}
|
||||
|
||||
void NotifyAvailable(ResultCode result);
|
||||
void NotifyAvailable(Result result);
|
||||
void NotifyAvailable() {
|
||||
return this->NotifyAvailable(ResultSuccess);
|
||||
}
|
||||
|
||||
@@ -80,8 +80,7 @@ public:
|
||||
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread from the wait list.
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
@@ -99,8 +98,8 @@ KThread::KThread(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
||||
KThread::~KThread() = default;
|
||||
|
||||
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
|
||||
s32 virt_core, KProcess* owner, ThreadType type) {
|
||||
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
|
||||
s32 virt_core, KProcess* owner, ThreadType type) {
|
||||
// Assert parameters are valid.
|
||||
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
|
||||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
|
||||
@@ -245,10 +244,10 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
||||
VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
|
||||
ThreadType type, std::function<void(void*)>&& init_func,
|
||||
void* init_func_parameter) {
|
||||
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
||||
VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
|
||||
ThreadType type, std::function<void(void*)>&& init_func,
|
||||
void* init_func_parameter) {
|
||||
// Initialize the thread.
|
||||
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
||||
|
||||
@@ -260,27 +259,26 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
||||
Result KThread::InitializeDummyThread(KThread* thread) {
|
||||
return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||
Core::CpuManager::GetIdleThreadStartFunc(),
|
||||
system.GetCpuManager().GetStartFuncParameter());
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg,
|
||||
s32 virt_core) {
|
||||
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
||||
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
|
||||
Core::CpuManager::GetShutdownThreadStartFunc(),
|
||||
system.GetCpuManager().GetStartFuncParameter());
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
|
||||
s32 prio, s32 virt_core, KProcess* owner) {
|
||||
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
|
||||
KProcess* owner) {
|
||||
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||
ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
|
||||
@@ -382,7 +380,7 @@ void KThread::FinishTermination() {
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||
KThread* core_thread{};
|
||||
do {
|
||||
core_thread = kernel.Scheduler(i).GetCurrentThread();
|
||||
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
|
||||
} while (core_thread == this);
|
||||
}
|
||||
}
|
||||
@@ -523,7 +521,7 @@ void KThread::ClearInterruptFlag() {
|
||||
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
|
||||
}
|
||||
|
||||
ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Get the virtual mask.
|
||||
@@ -533,7 +531,7 @@ ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
ASSERT(num_core_migration_disables >= 0);
|
||||
|
||||
@@ -549,7 +547,7 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
ASSERT(parent != nullptr);
|
||||
ASSERT(v_affinity_mask != 0);
|
||||
KScopedLightLock lk(activity_pause_lock);
|
||||
@@ -631,7 +629,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
s32 thread_core;
|
||||
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
|
||||
++thread_core) {
|
||||
if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
|
||||
if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
|
||||
thread_is_current = true;
|
||||
break;
|
||||
}
|
||||
@@ -748,7 +746,20 @@ void KThread::Continue() {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
void KThread::WaitUntilSuspended() {
|
||||
// Make sure we have a suspend requested.
|
||||
ASSERT(IsSuspendRequested());
|
||||
|
||||
// Loop until the thread is not executing on any core.
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||
KThread* core_thread{};
|
||||
do {
|
||||
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
|
||||
} while (core_thread == this);
|
||||
}
|
||||
}
|
||||
|
||||
Result KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk(activity_pause_lock);
|
||||
|
||||
@@ -809,7 +820,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
// Check if the thread is currently running.
|
||||
// If it is, we'll need to retry.
|
||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
||||
if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
|
||||
thread_is_current = true;
|
||||
break;
|
||||
}
|
||||
@@ -821,7 +832,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk{activity_pause_lock};
|
||||
|
||||
@@ -986,7 +997,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
return next_lock_owner;
|
||||
}
|
||||
|
||||
ResultCode KThread::Run() {
|
||||
Result KThread::Run() {
|
||||
while (true) {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
@@ -1047,7 +1058,7 @@ void KThread::Exit() {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KThread::Sleep(s64 timeout) {
|
||||
Result KThread::Sleep(s64 timeout) {
|
||||
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||
ASSERT(timeout > 0);
|
||||
@@ -1103,7 +1114,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
|
||||
wait_queue = queue;
|
||||
}
|
||||
|
||||
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
|
||||
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
@@ -1113,7 +1124,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCod
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::EndWait(ResultCode wait_result_) {
|
||||
void KThread::EndWait(Result wait_result_) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
@@ -1132,7 +1143,7 @@ void KThread::EndWait(ResultCode wait_result_) {
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
|
||||
void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
@@ -1162,6 +1173,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
void SetCurrentThread(KernelCore& kernel, KThread* thread) {
|
||||
kernel.SetCurrentEmuThread(thread);
|
||||
}
|
||||
|
||||
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
|
||||
return kernel.GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
@@ -106,6 +106,7 @@ enum class StepState : u32 {
|
||||
StepPerformed, ///< Thread has stepped, waiting to be scheduled again
|
||||
};
|
||||
|
||||
void SetCurrentThread(KernelCore& kernel, KThread* thread);
|
||||
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
|
||||
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
|
||||
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
|
||||
@@ -175,7 +176,7 @@ public:
|
||||
|
||||
void SetBasePriority(s32 value);
|
||||
|
||||
[[nodiscard]] ResultCode Run();
|
||||
[[nodiscard]] Result Run();
|
||||
|
||||
void Exit();
|
||||
|
||||
@@ -207,6 +208,8 @@ public:
|
||||
|
||||
void Continue();
|
||||
|
||||
void WaitUntilSuspended();
|
||||
|
||||
constexpr void SetSyncedIndex(s32 index) {
|
||||
synced_index = index;
|
||||
}
|
||||
@@ -215,11 +218,11 @@ public:
|
||||
return synced_index;
|
||||
}
|
||||
|
||||
constexpr void SetWaitResult(ResultCode wait_res) {
|
||||
constexpr void SetWaitResult(Result wait_res) {
|
||||
wait_result = wait_res;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr ResultCode GetWaitResult() const {
|
||||
[[nodiscard]] constexpr Result GetWaitResult() const {
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
@@ -342,15 +345,15 @@ public:
|
||||
return physical_affinity_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
[[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
[[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
|
||||
[[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
|
||||
[[nodiscard]] Result SetActivity(Svc::ThreadActivity activity);
|
||||
|
||||
[[nodiscard]] ResultCode Sleep(s64 timeout);
|
||||
[[nodiscard]] Result Sleep(s64 timeout);
|
||||
|
||||
[[nodiscard]] s64 GetYieldScheduleCount() const {
|
||||
return schedule_count;
|
||||
@@ -408,20 +411,19 @@ public:
|
||||
|
||||
static void PostDestroy(uintptr_t arg);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
||||
[[nodiscard]] static Result InitializeDummyThread(KThread* thread);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
|
||||
s32 virt_core);
|
||||
[[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
|
||||
s32 virt_core);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
|
||||
KThread* thread,
|
||||
KThreadFunction func,
|
||||
uintptr_t arg, s32 virt_core);
|
||||
[[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg,
|
||||
s32 virt_core);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg,
|
||||
VAddr user_stack_top, s32 prio,
|
||||
s32 virt_core, KProcess* owner);
|
||||
[[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg,
|
||||
VAddr user_stack_top, s32 prio, s32 virt_core,
|
||||
KProcess* owner);
|
||||
|
||||
public:
|
||||
struct StackParameters {
|
||||
@@ -607,7 +609,7 @@ public:
|
||||
|
||||
void RemoveWaiter(KThread* thread);
|
||||
|
||||
[[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
|
||||
[[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
|
||||
|
||||
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
@@ -633,9 +635,9 @@ public:
|
||||
}
|
||||
|
||||
void BeginWait(KThreadQueue* queue);
|
||||
void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
|
||||
void EndWait(ResultCode wait_result_);
|
||||
void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
|
||||
void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_);
|
||||
void EndWait(Result wait_result_);
|
||||
void CancelWait(Result wait_result_, bool cancel_timer_task);
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
@@ -721,14 +723,14 @@ private:
|
||||
|
||||
void FinishTermination();
|
||||
|
||||
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
|
||||
s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
|
||||
[[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
|
||||
s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio,
|
||||
s32 core, KProcess* owner, ThreadType type,
|
||||
std::function<void(void*)>&& init_func,
|
||||
void* init_func_parameter);
|
||||
[[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio,
|
||||
s32 core, KProcess* owner, ThreadType type,
|
||||
std::function<void(void*)>&& init_func,
|
||||
void* init_func_parameter);
|
||||
|
||||
static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
|
||||
|
||||
@@ -765,7 +767,7 @@ private:
|
||||
u32 suspend_request_flags{};
|
||||
u32 suspend_allowed_flags{};
|
||||
s32 synced_index{};
|
||||
ResultCode wait_result{ResultSuccess};
|
||||
Result wait_result{ResultSuccess};
|
||||
s32 base_priority{};
|
||||
s32 physical_ideal_core_id{};
|
||||
s32 virtual_ideal_core_id{};
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||
Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||
// Set that this process owns us.
|
||||
m_owner = process;
|
||||
m_kernel = &kernel;
|
||||
@@ -35,7 +35,7 @@ ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThreadLocalPage::Finalize() {
|
||||
Result KThreadLocalPage::Finalize() {
|
||||
// Get the physical address of the page.
|
||||
const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
|
||||
ASSERT(phys_addr);
|
||||
|
||||
@@ -34,8 +34,8 @@ public:
|
||||
return m_virt_addr;
|
||||
}
|
||||
|
||||
ResultCode Initialize(KernelCore& kernel, KProcess* process);
|
||||
ResultCode Finalize();
|
||||
Result Initialize(KernelCore& kernel, KProcess* process);
|
||||
Result Finalize();
|
||||
|
||||
VAddr Reserve();
|
||||
void Release(VAddr addr);
|
||||
|
||||
@@ -9,9 +9,9 @@ namespace Kernel {
|
||||
|
||||
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
|
||||
[[maybe_unused]] KSynchronizationObject* signaled_object,
|
||||
[[maybe_unused]] ResultCode wait_result) {}
|
||||
[[maybe_unused]] Result wait_result) {}
|
||||
|
||||
void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
|
||||
void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
|
||||
// Set the thread's wait result.
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
@@ -25,8 +25,7 @@ void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
|
||||
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
|
||||
}
|
||||
|
||||
void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) {
|
||||
void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
|
||||
// Set the thread's wait result.
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
@@ -43,6 +42,6 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
}
|
||||
|
||||
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
|
||||
[[maybe_unused]] ResultCode wait_result) {}
|
||||
[[maybe_unused]] Result wait_result) {}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -14,10 +14,9 @@ public:
|
||||
virtual ~KThreadQueue() = default;
|
||||
|
||||
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||
ResultCode wait_result);
|
||||
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
|
||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task);
|
||||
Result wait_result);
|
||||
virtual void EndWait(KThread* waiting_thread, Result wait_result);
|
||||
virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
@@ -28,7 +27,7 @@ class KThreadQueueWithoutEndWait : public KThreadQueue {
|
||||
public:
|
||||
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
|
||||
void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
|
||||
void EndWait(KThread* waiting_thread, Result wait_result) override final;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,8 +13,8 @@ KTransferMemory::KTransferMemory(KernelCore& kernel_)
|
||||
|
||||
KTransferMemory::~KTransferMemory() = default;
|
||||
|
||||
ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
|
||||
Svc::MemoryPermission owner_perm_) {
|
||||
Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
|
||||
Svc::MemoryPermission owner_perm_) {
|
||||
// Set members.
|
||||
owner = kernel.CurrentProcess();
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
@@ -26,7 +26,7 @@ public:
|
||||
explicit KTransferMemory(KernelCore& kernel_);
|
||||
~KTransferMemory() override;
|
||||
|
||||
ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
|
||||
Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
|
||||
parent->GetReadableEvent().Open();
|
||||
}
|
||||
|
||||
ResultCode KWritableEvent::Signal() {
|
||||
Result KWritableEvent::Signal() {
|
||||
return parent->GetReadableEvent().Signal();
|
||||
}
|
||||
|
||||
ResultCode KWritableEvent::Clear() {
|
||||
Result KWritableEvent::Clear() {
|
||||
return parent->GetReadableEvent().Clear();
|
||||
}
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ public:
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
|
||||
void Initialize(KEvent* parent_, std::string&& name_);
|
||||
ResultCode Signal();
|
||||
ResultCode Clear();
|
||||
Result Signal();
|
||||
Result Clear();
|
||||
|
||||
KEvent* GetParent() const {
|
||||
return parent;
|
||||
|
||||
@@ -331,6 +331,8 @@ struct KernelCore::Impl {
|
||||
return is_shutting_down.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
static inline thread_local KThread* current_thread{nullptr};
|
||||
|
||||
KThread* GetCurrentEmuThread() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (IsShuttingDown()) {
|
||||
@@ -341,7 +343,12 @@ struct KernelCore::Impl {
|
||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||
return GetHostDummyThread();
|
||||
}
|
||||
return schedulers[thread_id]->GetCurrentThread();
|
||||
|
||||
return current_thread;
|
||||
}
|
||||
|
||||
void SetCurrentEmuThread(KThread* thread) {
|
||||
current_thread = thread;
|
||||
}
|
||||
|
||||
void DeriveInitialMemoryLayout() {
|
||||
@@ -1024,6 +1031,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
|
||||
return impl->GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
void KernelCore::SetCurrentEmuThread(KThread* thread) {
|
||||
impl->SetCurrentEmuThread(thread);
|
||||
}
|
||||
|
||||
KMemoryManager& KernelCore::MemoryManager() {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
@@ -1078,6 +1089,13 @@ void KernelCore::Suspend(bool suspended) {
|
||||
|
||||
for (auto* process : GetProcessList()) {
|
||||
process->SetActivity(activity);
|
||||
|
||||
if (should_suspend) {
|
||||
// Wait for execution to stop
|
||||
for (auto* thread : process->GetThreadList()) {
|
||||
thread->WaitUntilSuspended();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,9 @@ public:
|
||||
/// Gets the current host_thread/guest_thread pointer.
|
||||
KThread* GetCurrentEmuThread() const;
|
||||
|
||||
/// Sets the current guest_thread pointer.
|
||||
void SetCurrentEmuThread(KThread* thread);
|
||||
|
||||
/// Gets the current host_thread handle.
|
||||
u32 GetCurrentHostThreadID() const;
|
||||
|
||||
|
||||
@@ -68,9 +68,9 @@ u32 GetFlagBitOffset(CapabilityType type) {
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
// Allow all cores and priorities.
|
||||
@@ -81,9 +81,9 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
@@ -107,9 +107,8 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
|
||||
can_force_debug = true;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table) {
|
||||
u32 set_flags = 0;
|
||||
u32 set_svc_bits = 0;
|
||||
|
||||
@@ -155,8 +154,8 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
|
||||
u32 flag, KPageTable& page_table) {
|
||||
Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
KPageTable& page_table) {
|
||||
const auto type = GetCapabilityType(flag);
|
||||
|
||||
if (type == CapabilityType::Unset) {
|
||||
@@ -224,7 +223,7 @@ void ProcessCapabilities::Clear() {
|
||||
can_force_debug = false;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
if (priority_mask != 0 || core_mask != 0) {
|
||||
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
|
||||
priority_mask, core_mask);
|
||||
@@ -266,7 +265,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
|
||||
Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
|
||||
const u32 index = flags >> 29;
|
||||
const u32 svc_bit = 1U << index;
|
||||
|
||||
@@ -290,23 +289,23 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
||||
KPageTable& page_table) {
|
||||
Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
||||
KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
|
||||
Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
|
||||
Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
||||
constexpr u32 interrupt_ignore_value = 0x3FF;
|
||||
const u32 interrupt0 = (flags >> 12) & 0x3FF;
|
||||
const u32 interrupt1 = (flags >> 22) & 0x3FF;
|
||||
@@ -333,7 +332,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 17;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
@@ -344,7 +343,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
||||
// Yes, the internal member variable is checked in the actual kernel here.
|
||||
// This might look odd for options that are only allowed to be initialized
|
||||
// just once, however the kernel has a separate initialization function for
|
||||
@@ -364,7 +363,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 26;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
@@ -375,7 +374,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
|
||||
Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 19;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -86,8 +86,8 @@ public:
|
||||
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a userland process.
|
||||
///
|
||||
@@ -99,8 +99,8 @@ public:
|
||||
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a process that does not
|
||||
/// have any metadata to parse.
|
||||
@@ -185,8 +185,8 @@ private:
|
||||
///
|
||||
/// @return ResultSuccess if no errors occur, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Attempts to parse a capability descriptor that is only represented by a
|
||||
/// single flag set.
|
||||
@@ -200,8 +200,8 @@ private:
|
||||
///
|
||||
/// @return ResultSuccess if no errors occurred, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
KPageTable& page_table);
|
||||
Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Clears the internal state of this process capability instance. Necessary,
|
||||
/// to have a sane starting point due to us allowing running executables without
|
||||
@@ -219,34 +219,34 @@ private:
|
||||
void Clear();
|
||||
|
||||
/// Handles flags related to the priority and core number capability flags.
|
||||
ResultCode HandlePriorityCoreNumFlags(u32 flags);
|
||||
Result HandlePriorityCoreNumFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to determining the allowable SVC mask.
|
||||
ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
||||
Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
||||
|
||||
/// Handles flags related to mapping physical memory pages.
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
|
||||
Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to mapping IO pages.
|
||||
ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
|
||||
Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to mapping physical memory regions.
|
||||
ResultCode HandleMapRegionFlags(u32 flags, KPageTable& page_table);
|
||||
Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to the interrupt capability flags.
|
||||
ResultCode HandleInterruptFlags(u32 flags);
|
||||
Result HandleInterruptFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the program type.
|
||||
ResultCode HandleProgramTypeFlags(u32 flags);
|
||||
Result HandleProgramTypeFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the handle table size.
|
||||
ResultCode HandleHandleTableFlags(u32 flags);
|
||||
Result HandleHandleTableFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to the kernel version capability flags.
|
||||
ResultCode HandleKernelVersionFlags(u32 flags);
|
||||
Result HandleKernelVersionFlags(u32 flags);
|
||||
|
||||
/// Handles flags related to debug-specific capabilities.
|
||||
ResultCode HandleDebugFlags(u32 flags);
|
||||
Result HandleDebugFlags(u32 flags);
|
||||
|
||||
SyscallCapabilities svc_capabilities;
|
||||
InterruptCapabilities interrupt_capabilities;
|
||||
|
||||
@@ -58,8 +58,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
|
||||
// Helper function that performs the common sanity checks for svcMapMemory
|
||||
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
||||
// in the same order.
|
||||
ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
|
||||
u64 size) {
|
||||
Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
|
||||
u64 size) {
|
||||
if (!Common::Is4KBAligned(dst_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
||||
return ResultInvalidAddress;
|
||||
@@ -135,7 +135,7 @@ enum class ResourceLimitValueType {
|
||||
} // Anonymous namespace
|
||||
|
||||
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
||||
static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
|
||||
static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
|
||||
|
||||
// Validate size.
|
||||
@@ -148,9 +148,9 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
|
||||
static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
|
||||
VAddr temp_heap_addr{};
|
||||
const ResultCode result{SetHeapSize(system, &temp_heap_addr, heap_size)};
|
||||
const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
|
||||
*heap_addr = static_cast<u32>(temp_heap_addr);
|
||||
return result;
|
||||
}
|
||||
@@ -166,8 +166,8 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
|
||||
}
|
||||
}
|
||||
|
||||
static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size,
|
||||
MemoryPermission perm) {
|
||||
static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
|
||||
MemoryPermission perm) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
|
||||
perm);
|
||||
|
||||
@@ -188,8 +188,8 @@ static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 s
|
||||
return page_table.SetMemoryPermission(address, size, perm);
|
||||
}
|
||||
|
||||
static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
|
||||
u32 attr) {
|
||||
static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
|
||||
u32 attr) {
|
||||
LOG_DEBUG(Kernel_SVC,
|
||||
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
|
||||
size, mask, attr);
|
||||
@@ -213,19 +213,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
|
||||
return page_table.SetMemoryAttribute(address, size, mask, attr);
|
||||
}
|
||||
|
||||
static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
|
||||
u32 attr) {
|
||||
static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
|
||||
u32 attr) {
|
||||
return SetMemoryAttribute(system, address, size, mask, attr);
|
||||
}
|
||||
|
||||
/// Maps a memory range into a different range.
|
||||
static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
||||
src_addr, size);
|
||||
|
||||
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
||||
|
||||
if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
||||
if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
@@ -233,18 +233,18 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
|
||||
return page_table.MapMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
||||
static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
||||
return MapMemory(system, dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
/// Unmaps a region that was previously mapped with svcMapMemory
|
||||
static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
||||
src_addr, size);
|
||||
|
||||
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
||||
|
||||
if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
||||
if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
@@ -252,12 +252,12 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
|
||||
return page_table.UnmapMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
||||
static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
|
||||
return UnmapMemory(system, dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
/// Connect to an OS service given the port name, returns the handle to the port to out
|
||||
static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
|
||||
static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
|
||||
auto& memory = system.Memory();
|
||||
if (!memory.IsValidVirtualAddress(port_name_address)) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
@@ -307,14 +307,14 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr po
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||
u32 port_name_address) {
|
||||
static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||
u32 port_name_address) {
|
||||
|
||||
return ConnectToNamedPort(system, out_handle, port_name_address);
|
||||
}
|
||||
|
||||
/// Makes a blocking IPC call to an OS service.
|
||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
static Result SendSyncRequest(Core::System& system, Handle handle) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Create the wait queue.
|
||||
@@ -327,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||
|
||||
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
@@ -337,15 +336,15 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
|
||||
}
|
||||
|
||||
return thread->GetWaitResult();
|
||||
return GetCurrentThread(kernel).GetWaitResult();
|
||||
}
|
||||
|
||||
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
||||
static Result SendSyncRequest32(Core::System& system, Handle handle) {
|
||||
return SendSyncRequest(system, handle);
|
||||
}
|
||||
|
||||
/// Get the ID for the specified thread.
|
||||
static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
|
||||
static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
|
||||
// Get the thread from its handle.
|
||||
KScopedAutoObject thread =
|
||||
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
|
||||
@@ -356,10 +355,10 @@ static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle t
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
|
||||
u32* out_thread_id_high, Handle thread_handle) {
|
||||
static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
|
||||
Handle thread_handle) {
|
||||
u64 out_thread_id{};
|
||||
const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
|
||||
const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
|
||||
|
||||
*out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
|
||||
*out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
|
||||
@@ -368,7 +367,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
|
||||
}
|
||||
|
||||
/// Gets the ID of the specified process or a specified thread's owning process.
|
||||
static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
|
||||
static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
|
||||
|
||||
// Get the object from the handle table.
|
||||
@@ -399,8 +398,8 @@ static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
|
||||
u32* out_process_id_high, Handle handle) {
|
||||
static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
|
||||
u32* out_process_id_high, Handle handle) {
|
||||
u64 out_process_id{};
|
||||
const auto result = GetProcessId(system, &out_process_id, handle);
|
||||
*out_process_id_low = static_cast<u32>(out_process_id);
|
||||
@@ -409,8 +408,8 @@ static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
|
||||
}
|
||||
|
||||
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
||||
static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
|
||||
s32 num_handles, s64 nano_seconds) {
|
||||
static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
|
||||
s32 num_handles, s64 nano_seconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
|
||||
handles_address, num_handles, nano_seconds);
|
||||
|
||||
@@ -445,14 +444,14 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha
|
||||
nano_seconds);
|
||||
}
|
||||
|
||||
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
||||
s32 num_handles, u32 timeout_high, s32* index) {
|
||||
static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
||||
s32 num_handles, u32 timeout_high, s32* index) {
|
||||
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
|
||||
return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
|
||||
}
|
||||
|
||||
/// Resumes a thread waiting on WaitSynchronization
|
||||
static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
|
||||
static Result CancelSynchronization(Core::System& system, Handle handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
|
||||
|
||||
// Get the thread from its handle.
|
||||
@@ -465,13 +464,12 @@ static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CancelSynchronization32(Core::System& system, Handle handle) {
|
||||
static Result CancelSynchronization32(Core::System& system, Handle handle) {
|
||||
return CancelSynchronization(system, handle);
|
||||
}
|
||||
|
||||
/// Attempts to locks a mutex
|
||||
static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
|
||||
u32 tag) {
|
||||
static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
|
||||
thread_handle, address, tag);
|
||||
|
||||
@@ -489,13 +487,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAdd
|
||||
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
|
||||
u32 tag) {
|
||||
static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
|
||||
return ArbitrateLock(system, thread_handle, address, tag);
|
||||
}
|
||||
|
||||
/// Unlock a mutex
|
||||
static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
||||
static Result ArbitrateUnlock(Core::System& system, VAddr address) {
|
||||
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
|
||||
|
||||
// Validate the input address.
|
||||
@@ -513,7 +510,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
||||
return system.Kernel().CurrentProcess()->SignalToAddress(address);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
|
||||
static Result ArbitrateUnlock32(Core::System& system, u32 address) {
|
||||
return ArbitrateUnlock(system, address);
|
||||
}
|
||||
|
||||
@@ -624,7 +621,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
||||
|
||||
handle_debug_buffer(info1, info2);
|
||||
|
||||
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
const auto thread_processor_id = current_thread->GetActiveCore();
|
||||
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
|
||||
}
|
||||
@@ -656,8 +653,8 @@ static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
|
||||
}
|
||||
|
||||
/// Gets system/memory information for the current process
|
||||
static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
|
||||
u64 info_sub_id) {
|
||||
static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
|
||||
u64 info_sub_id) {
|
||||
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
|
||||
info_sub_id, handle);
|
||||
|
||||
@@ -692,6 +689,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||
// 6.0.0+
|
||||
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
|
||||
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
|
||||
|
||||
// Homebrew only
|
||||
MesosphereCurrentProcess = 65001,
|
||||
};
|
||||
|
||||
const auto info_id_type = static_cast<GetInfoType>(info_id);
|
||||
@@ -884,7 +884,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||
|
||||
const auto& core_timing = system.CoreTiming();
|
||||
const auto& scheduler = *system.Kernel().CurrentScheduler();
|
||||
const auto* const current_thread = scheduler.GetCurrentThread();
|
||||
const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
const bool same_thread = current_thread == thread.GetPointerUnsafe();
|
||||
|
||||
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
|
||||
@@ -914,18 +914,39 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
|
||||
return ResultSuccess;
|
||||
}
|
||||
case GetInfoType::MesosphereCurrentProcess: {
|
||||
// Verify the input handle is invalid.
|
||||
R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
|
||||
|
||||
// Verify the sub-type is valid.
|
||||
R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
|
||||
|
||||
// Get the handle table.
|
||||
KProcess* current_process = system.Kernel().CurrentProcess();
|
||||
KHandleTable& handle_table = current_process->GetHandleTable();
|
||||
|
||||
// Get a new handle for the current process.
|
||||
Handle tmp;
|
||||
R_TRY(handle_table.Add(&tmp, current_process));
|
||||
|
||||
// Set the output.
|
||||
*result = tmp;
|
||||
|
||||
// We succeeded.
|
||||
return ResultSuccess;
|
||||
}
|
||||
default:
|
||||
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
|
||||
return ResultInvalidEnumValue;
|
||||
}
|
||||
}
|
||||
|
||||
static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
|
||||
u32 info_id, u32 handle, u32 sub_id_high) {
|
||||
static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
|
||||
u32 info_id, u32 handle, u32 sub_id_high) {
|
||||
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
|
||||
u64 res_value{};
|
||||
|
||||
const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
|
||||
const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
|
||||
*result_high = static_cast<u32>(res_value >> 32);
|
||||
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
|
||||
|
||||
@@ -933,7 +954,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
|
||||
}
|
||||
|
||||
/// Maps memory at a desired address
|
||||
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
||||
static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
||||
|
||||
if (!Common::Is4KBAligned(addr)) {
|
||||
@@ -981,12 +1002,12 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
|
||||
return page_table.MapPhysicalMemory(addr, size);
|
||||
}
|
||||
|
||||
static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
||||
static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
||||
return MapPhysicalMemory(system, addr, size);
|
||||
}
|
||||
|
||||
/// Unmaps memory previously mapped via MapPhysicalMemory
|
||||
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
||||
static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
|
||||
|
||||
if (!Common::Is4KBAligned(addr)) {
|
||||
@@ -1034,13 +1055,13 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
|
||||
return page_table.UnmapPhysicalMemory(addr, size);
|
||||
}
|
||||
|
||||
static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
||||
static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
|
||||
return UnmapPhysicalMemory(system, addr, size);
|
||||
}
|
||||
|
||||
/// Sets the thread activity
|
||||
static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
|
||||
ThreadActivity thread_activity) {
|
||||
static Result SetThreadActivity(Core::System& system, Handle thread_handle,
|
||||
ThreadActivity thread_activity) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
|
||||
thread_activity);
|
||||
|
||||
@@ -1065,13 +1086,13 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
|
||||
Svc::ThreadActivity thread_activity) {
|
||||
static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
|
||||
Svc::ThreadActivity thread_activity) {
|
||||
return SetThreadActivity(system, thread_handle, thread_activity);
|
||||
}
|
||||
|
||||
/// Gets the thread context
|
||||
static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
|
||||
static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
|
||||
thread_handle);
|
||||
|
||||
@@ -1103,7 +1124,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
|
||||
if (thread->GetRawState() != ThreadState::Runnable) {
|
||||
bool current = false;
|
||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||
if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
|
||||
if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
|
||||
current = true;
|
||||
break;
|
||||
}
|
||||
@@ -1128,12 +1149,12 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
|
||||
static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
|
||||
return GetThreadContext(system, out_context, thread_handle);
|
||||
}
|
||||
|
||||
/// Gets the priority for the specified thread
|
||||
static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
|
||||
static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called");
|
||||
|
||||
// Get the thread from its handle.
|
||||
@@ -1146,12 +1167,12 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
|
||||
static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
|
||||
return GetThreadPriority(system, out_priority, handle);
|
||||
}
|
||||
|
||||
/// Sets the priority for the specified thread
|
||||
static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
|
||||
static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
|
||||
// Get the current process.
|
||||
KProcess& process = *system.Kernel().CurrentProcess();
|
||||
|
||||
@@ -1169,7 +1190,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
|
||||
static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
|
||||
return SetThreadPriority(system, thread_handle, priority);
|
||||
}
|
||||
|
||||
@@ -1229,8 +1250,8 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission p
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
|
||||
u64 size, Svc::MemoryPermission map_perm) {
|
||||
static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
|
||||
Svc::MemoryPermission map_perm) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
||||
shmem_handle, address, size, map_perm);
|
||||
@@ -1270,13 +1291,13 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAd
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
|
||||
u32 size, Svc::MemoryPermission map_perm) {
|
||||
static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
|
||||
Svc::MemoryPermission map_perm) {
|
||||
return MapSharedMemory(system, shmem_handle, address, size, map_perm);
|
||||
}
|
||||
|
||||
static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
|
||||
u64 size) {
|
||||
static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
|
||||
u64 size) {
|
||||
// Validate the address/size.
|
||||
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
|
||||
@@ -1303,13 +1324,13 @@ static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, V
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
|
||||
u32 size) {
|
||||
static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
|
||||
u32 size) {
|
||||
return UnmapSharedMemory(system, shmem_handle, address, size);
|
||||
}
|
||||
|
||||
static ResultCode SetProcessMemoryPermission(Core::System& system, Handle process_handle,
|
||||
VAddr address, u64 size, Svc::MemoryPermission perm) {
|
||||
static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
|
||||
u64 size, Svc::MemoryPermission perm) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
||||
process_handle, address, size, perm);
|
||||
@@ -1338,8 +1359,8 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
|
||||
return page_table.SetProcessMemoryPermission(address, size, perm);
|
||||
}
|
||||
|
||||
static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
|
||||
dst_address, process_handle, src_address, size);
|
||||
@@ -1368,7 +1389,7 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Create a new page group.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(src_pt.MakeAndOpenPageGroup(
|
||||
std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
|
||||
KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
|
||||
@@ -1381,8 +1402,8 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
|
||||
dst_address, process_handle, src_address, size);
|
||||
@@ -1416,7 +1437,7 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
|
||||
static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
|
||||
|
||||
// Get kernel instance.
|
||||
@@ -1451,12 +1472,12 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
|
||||
static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
|
||||
return CreateCodeMemory(system, out, address, size);
|
||||
}
|
||||
|
||||
static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
|
||||
VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
|
||||
VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
|
||||
@@ -1534,15 +1555,13 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode ControlCodeMemory32(Core::System& system, Handle code_memory_handle,
|
||||
u32 operation, u64 address, u64 size,
|
||||
Svc::MemoryPermission perm) {
|
||||
static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
|
||||
u64 address, u64 size, Svc::MemoryPermission perm) {
|
||||
return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
|
||||
}
|
||||
|
||||
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
|
||||
VAddr page_info_address, Handle process_handle,
|
||||
VAddr address) {
|
||||
static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
|
||||
VAddr page_info_address, Handle process_handle, VAddr address) {
|
||||
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
|
||||
@@ -1570,8 +1589,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
|
||||
VAddr page_info_address, VAddr query_address) {
|
||||
static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
|
||||
VAddr query_address) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
|
||||
"query_address=0x{:016X}",
|
||||
@@ -1581,13 +1600,13 @@ static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
|
||||
query_address);
|
||||
}
|
||||
|
||||
static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
|
||||
u32 page_info_address, u32 query_address) {
|
||||
static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
|
||||
u32 query_address) {
|
||||
return QueryMemory(system, memory_info_address, page_info_address, query_address);
|
||||
}
|
||||
|
||||
static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
|
||||
u64 src_address, u64 size) {
|
||||
static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
|
||||
u64 src_address, u64 size) {
|
||||
LOG_DEBUG(Kernel_SVC,
|
||||
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
|
||||
"src_address=0x{:016X}, size=0x{:016X}",
|
||||
@@ -1654,8 +1673,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
|
||||
return page_table.MapCodeMemory(dst_address, src_address, size);
|
||||
}
|
||||
|
||||
static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
|
||||
u64 dst_address, u64 src_address, u64 size) {
|
||||
static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
|
||||
u64 src_address, u64 size) {
|
||||
LOG_DEBUG(Kernel_SVC,
|
||||
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
|
||||
"size=0x{:016X}",
|
||||
@@ -1747,8 +1766,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
|
||||
} // Anonymous namespace
|
||||
|
||||
/// Creates a new thread
|
||||
static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
|
||||
VAddr stack_bottom, u32 priority, s32 core_id) {
|
||||
static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
|
||||
VAddr stack_bottom, u32 priority, s32 core_id) {
|
||||
LOG_DEBUG(Kernel_SVC,
|
||||
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
|
||||
"priority=0x{:08X}, core_id=0x{:08X}",
|
||||
@@ -1819,13 +1838,13 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
|
||||
u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
|
||||
static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
|
||||
u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
|
||||
return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
|
||||
}
|
||||
|
||||
/// Starts the thread for the provided handle
|
||||
static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
static Result StartThread(Core::System& system, Handle thread_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
||||
|
||||
// Get the thread from its handle.
|
||||
@@ -1843,7 +1862,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
|
||||
static Result StartThread32(Core::System& system, Handle thread_handle) {
|
||||
return StartThread(system, thread_handle);
|
||||
}
|
||||
|
||||
@@ -1851,7 +1870,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
|
||||
static void ExitThread(Core::System& system) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
|
||||
|
||||
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
system.GlobalSchedulerContext().RemoveThread(current_thread);
|
||||
current_thread->Exit();
|
||||
system.Kernel().UnregisterInUseObject(current_thread);
|
||||
@@ -1894,8 +1913,8 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
|
||||
}
|
||||
|
||||
/// Wait process wide key atomic
|
||||
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
|
||||
u32 tag, s64 timeout_ns) {
|
||||
static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
|
||||
s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
|
||||
cv_key, tag, timeout_ns);
|
||||
|
||||
@@ -1930,8 +1949,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address,
|
||||
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
|
||||
}
|
||||
|
||||
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
|
||||
u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
|
||||
u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
|
||||
}
|
||||
@@ -1976,8 +1995,8 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
|
||||
} // namespace
|
||||
|
||||
// Wait for an address (via Address Arbiter)
|
||||
static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
|
||||
s32 value, s64 timeout_ns) {
|
||||
static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
|
||||
s32 value, s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
|
||||
address, arb_type, value, timeout_ns);
|
||||
|
||||
@@ -2014,15 +2033,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::Arbit
|
||||
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
|
||||
s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
|
||||
s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitForAddress(system, address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
// Signals to an address (via Address Arbiter)
|
||||
static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
|
||||
address, signal_type, value, count);
|
||||
|
||||
@@ -2063,8 +2082,8 @@ static void SynchronizePreemptionState(Core::System& system) {
|
||||
}
|
||||
}
|
||||
|
||||
static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
return SignalToAddress(system, address, signal_type, value, count);
|
||||
}
|
||||
|
||||
@@ -2102,7 +2121,7 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high)
|
||||
}
|
||||
|
||||
/// Close a handle
|
||||
static ResultCode CloseHandle(Core::System& system, Handle handle) {
|
||||
static Result CloseHandle(Core::System& system, Handle handle) {
|
||||
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
|
||||
|
||||
// Remove the handle.
|
||||
@@ -2112,12 +2131,12 @@ static ResultCode CloseHandle(Core::System& system, Handle handle) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CloseHandle32(Core::System& system, Handle handle) {
|
||||
static Result CloseHandle32(Core::System& system, Handle handle) {
|
||||
return CloseHandle(system, handle);
|
||||
}
|
||||
|
||||
/// Clears the signaled state of an event or process.
|
||||
static ResultCode ResetSignal(Core::System& system, Handle handle) {
|
||||
static Result ResetSignal(Core::System& system, Handle handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
|
||||
|
||||
// Get the current handle table.
|
||||
@@ -2144,7 +2163,7 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
static ResultCode ResetSignal32(Core::System& system, Handle handle) {
|
||||
static Result ResetSignal32(Core::System& system, Handle handle) {
|
||||
return ResetSignal(system, handle);
|
||||
}
|
||||
|
||||
@@ -2164,8 +2183,8 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
|
||||
} // Anonymous namespace
|
||||
|
||||
/// Creates a TransferMemory object
|
||||
static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
|
||||
MemoryPermission map_perm) {
|
||||
static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
|
||||
MemoryPermission map_perm) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Validate the size.
|
||||
@@ -2211,13 +2230,13 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
|
||||
MemoryPermission map_perm) {
|
||||
static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
|
||||
MemoryPermission map_perm) {
|
||||
return CreateTransferMemory(system, out, address, size, map_perm);
|
||||
}
|
||||
|
||||
static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
|
||||
u64* out_affinity_mask) {
|
||||
static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
|
||||
u64* out_affinity_mask) {
|
||||
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
||||
|
||||
// Get the thread from its handle.
|
||||
@@ -2231,8 +2250,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
|
||||
u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
|
||||
static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
|
||||
u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
|
||||
u64 out_affinity_mask{};
|
||||
const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
|
||||
*out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
|
||||
@@ -2240,8 +2259,8 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle
|
||||
return result;
|
||||
}
|
||||
|
||||
static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
|
||||
u64 affinity_mask) {
|
||||
static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
|
||||
u64 affinity_mask) {
|
||||
// Determine the core id/affinity mask.
|
||||
if (core_id == IdealCoreUseProcessValue) {
|
||||
core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
|
||||
@@ -2272,13 +2291,13 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
|
||||
u32 affinity_mask_low, u32 affinity_mask_high) {
|
||||
static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
|
||||
u32 affinity_mask_low, u32 affinity_mask_high) {
|
||||
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
|
||||
return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
|
||||
}
|
||||
|
||||
static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
|
||||
static Result SignalEvent(Core::System& system, Handle event_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
||||
|
||||
// Get the current handle table.
|
||||
@@ -2291,11 +2310,11 @@ static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
|
||||
return writable_event->Signal();
|
||||
}
|
||||
|
||||
static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
|
||||
static Result SignalEvent32(Core::System& system, Handle event_handle) {
|
||||
return SignalEvent(system, event_handle);
|
||||
}
|
||||
|
||||
static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
|
||||
static Result ClearEvent(Core::System& system, Handle event_handle) {
|
||||
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
|
||||
|
||||
// Get the current handle table.
|
||||
@@ -2322,11 +2341,11 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
|
||||
static Result ClearEvent32(Core::System& system, Handle event_handle) {
|
||||
return ClearEvent(system, event_handle);
|
||||
}
|
||||
|
||||
static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||
static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||
LOG_DEBUG(Kernel_SVC, "called");
|
||||
|
||||
// Get the kernel reference and handle table.
|
||||
@@ -2371,11 +2390,11 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||
static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||
return CreateEvent(system, out_write, out_read);
|
||||
}
|
||||
|
||||
static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
|
||||
static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
|
||||
|
||||
// This function currently only allows retrieving a process' status.
|
||||
@@ -2401,7 +2420,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
||||
static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called");
|
||||
|
||||
// Create a new resource limit.
|
||||
@@ -2424,9 +2443,8 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
|
||||
Handle resource_limit_handle,
|
||||
LimitableResource which) {
|
||||
static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
|
||||
Handle resource_limit_handle, LimitableResource which) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
|
||||
which);
|
||||
|
||||
@@ -2445,9 +2463,8 @@ static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limi
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
|
||||
Handle resource_limit_handle,
|
||||
LimitableResource which) {
|
||||
static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
|
||||
Handle resource_limit_handle, LimitableResource which) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
|
||||
which);
|
||||
|
||||
@@ -2466,8 +2483,8 @@ static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_cu
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
|
||||
LimitableResource which, u64 limit_value) {
|
||||
static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
|
||||
LimitableResource which, u64 limit_value) {
|
||||
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
|
||||
resource_limit_handle, which, limit_value);
|
||||
|
||||
@@ -2486,8 +2503,8 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
|
||||
VAddr out_process_ids, u32 out_process_ids_size) {
|
||||
static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
|
||||
u32 out_process_ids_size) {
|
||||
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
|
||||
out_process_ids, out_process_ids_size);
|
||||
|
||||
@@ -2523,8 +2540,8 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
|
||||
u32 out_thread_ids_size, Handle debug_handle) {
|
||||
static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
|
||||
u32 out_thread_ids_size, Handle debug_handle) {
|
||||
// TODO: Handle this case when debug events are supported.
|
||||
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
|
||||
|
||||
@@ -2563,9 +2580,9 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
|
||||
[[maybe_unused]] Handle handle,
|
||||
[[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
|
||||
static Result FlushProcessDataCache32([[maybe_unused]] Core::System& system,
|
||||
[[maybe_unused]] Handle handle, [[maybe_unused]] u32 address,
|
||||
[[maybe_unused]] u32 size) {
|
||||
// Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
|
||||
// as all emulation is done in the same cache level in host architecture, thus data cache
|
||||
// does not need flushing.
|
||||
@@ -2993,7 +3010,7 @@ void Call(Core::System& system, u32 immediate) {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.EnterSVCProfile();
|
||||
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* thread = GetCurrentThreadPointer(kernel);
|
||||
thread->SetIsCallingSvc();
|
||||
|
||||
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
|
||||
|
||||
@@ -9,34 +9,34 @@ namespace Kernel {
|
||||
|
||||
// Confirmed Switch kernel error codes
|
||||
|
||||
constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
|
||||
constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
|
||||
constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
|
||||
constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
|
||||
constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
|
||||
constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
|
||||
constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
|
||||
constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
|
||||
constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
|
||||
constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
|
||||
constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
|
||||
constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
|
||||
constexpr Result ResultOutOfSessions{ErrorModule::Kernel, 7};
|
||||
constexpr Result ResultInvalidArgument{ErrorModule::Kernel, 14};
|
||||
constexpr Result ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
|
||||
constexpr Result ResultTerminationRequested{ErrorModule::Kernel, 59};
|
||||
constexpr Result ResultInvalidSize{ErrorModule::Kernel, 101};
|
||||
constexpr Result ResultInvalidAddress{ErrorModule::Kernel, 102};
|
||||
constexpr Result ResultOutOfResource{ErrorModule::Kernel, 103};
|
||||
constexpr Result ResultOutOfMemory{ErrorModule::Kernel, 104};
|
||||
constexpr Result ResultOutOfHandles{ErrorModule::Kernel, 105};
|
||||
constexpr Result ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
|
||||
constexpr Result ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
|
||||
constexpr Result ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
|
||||
constexpr Result ResultInvalidPriority{ErrorModule::Kernel, 112};
|
||||
constexpr Result ResultInvalidCoreId{ErrorModule::Kernel, 113};
|
||||
constexpr Result ResultInvalidHandle{ErrorModule::Kernel, 114};
|
||||
constexpr Result ResultInvalidPointer{ErrorModule::Kernel, 115};
|
||||
constexpr Result ResultInvalidCombination{ErrorModule::Kernel, 116};
|
||||
constexpr Result ResultTimedOut{ErrorModule::Kernel, 117};
|
||||
constexpr Result ResultCancelled{ErrorModule::Kernel, 118};
|
||||
constexpr Result ResultOutOfRange{ErrorModule::Kernel, 119};
|
||||
constexpr Result ResultInvalidEnumValue{ErrorModule::Kernel, 120};
|
||||
constexpr Result ResultNotFound{ErrorModule::Kernel, 121};
|
||||
constexpr Result ResultBusy{ErrorModule::Kernel, 122};
|
||||
constexpr Result ResultSessionClosed{ErrorModule::Kernel, 123};
|
||||
constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
|
||||
constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
|
||||
constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
|
||||
constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
|
||||
constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,24 +33,24 @@ static inline void FuncReturn32(Core::System& system, u32 result) {
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Function wrappers that return type ResultCode
|
||||
// Function wrappers that return type Result
|
||||
|
||||
template <ResultCode func(Core::System&, u64)>
|
||||
template <Result func(Core::System&, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64)>
|
||||
template <Result func(Core::System&, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32)>
|
||||
template <Result func(Core::System&, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u32)>
|
||||
template <Result func(Core::System&, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
@@ -58,14 +58,14 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetThreadActivity
|
||||
template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
|
||||
template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::ThreadActivity>(Param(system, 1)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u64, u64)>
|
||||
template <Result func(Core::System&, u32, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
|
||||
Param(system, 2), Param(system, 3))
|
||||
@@ -73,7 +73,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by MapProcessMemory and UnmapProcessMemory
|
||||
template <ResultCode func(Core::System&, u64, u32, u64, u64)>
|
||||
template <Result func(Core::System&, u64, u32, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
Param(system, 2), Param(system, 3))
|
||||
@@ -81,7 +81,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by ControlCodeMemory
|
||||
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
|
||||
@@ -89,7 +89,7 @@ void SvcWrap64(Core::System& system) {
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*)>
|
||||
template <Result func(Core::System&, u32*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param = 0;
|
||||
const u32 retval = func(system, ¶m).raw;
|
||||
@@ -97,7 +97,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u32)>
|
||||
template <Result func(Core::System&, u32*, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1))).raw;
|
||||
@@ -105,7 +105,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u32*)>
|
||||
template <Result func(Core::System&, u32*, u32*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
@@ -118,7 +118,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64)>
|
||||
template <Result func(Core::System&, u32*, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1)).raw;
|
||||
@@ -126,7 +126,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u32)>
|
||||
template <Result func(Core::System&, u32*, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval =
|
||||
@@ -136,7 +136,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u32)>
|
||||
template <Result func(Core::System&, u64*, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1))).raw;
|
||||
@@ -145,12 +145,12 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32)>
|
||||
template <Result func(Core::System&, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u64)>
|
||||
template <Result func(Core::System&, u64*, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1)).raw;
|
||||
@@ -159,7 +159,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64*, u32, u32)>
|
||||
template <Result func(Core::System&, u64*, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<u32>(Param(system, 1)),
|
||||
@@ -171,7 +171,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetResourceLimitLimitValue.
|
||||
template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
|
||||
template <Result func(Core::System&, u64*, Handle, LimitableResource)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, static_cast<Handle>(Param(system, 1)),
|
||||
@@ -182,13 +182,13 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64)>
|
||||
template <Result func(Core::System&, u32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
|
||||
}
|
||||
|
||||
// Used by SetResourceLimitLimitValue
|
||||
template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
|
||||
template <Result func(Core::System&, Handle, LimitableResource, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
|
||||
static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
|
||||
@@ -196,7 +196,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetThreadCoreMask
|
||||
template <ResultCode func(Core::System&, Handle, s32, u64)>
|
||||
template <Result func(Core::System&, Handle, s32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<s32>(Param(system, 1)), Param(system, 2))
|
||||
@@ -204,44 +204,44 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetThreadCoreMask
|
||||
template <ResultCode func(Core::System&, Handle, s32*, u64*)>
|
||||
template <Result func(Core::System&, Handle, s32*, u64*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
u64 param_2 = 0;
|
||||
const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), ¶m_1, ¶m_2);
|
||||
const Result retval = func(system, static_cast<u32>(Param(system, 2)), ¶m_1, ¶m_2);
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
system.CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(system, retval.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, u32)>
|
||||
template <Result func(Core::System&, u64, u64, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, u64)>
|
||||
template <Result func(Core::System&, u64, u64, u32, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), Param(system, 3))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u32)>
|
||||
template <Result func(Core::System&, u32, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u64)>
|
||||
template <Result func(Core::System&, u64, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32)>
|
||||
template <Result func(Core::System&, u64, u64, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
@@ -249,7 +249,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetMemoryPermission
|
||||
template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<Svc::MemoryPermission>(Param(system, 2)))
|
||||
@@ -257,14 +257,14 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by MapSharedMemory
|
||||
template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
|
||||
Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32, u64, u64)>
|
||||
template <Result func(Core::System&, u32, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(
|
||||
system,
|
||||
@@ -272,7 +272,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitSynchronization
|
||||
template <ResultCode func(Core::System&, s32*, u64, s32, s64)>
|
||||
template <Result func(Core::System&, s32*, u64, s32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast<s32>(Param(system, 2)),
|
||||
@@ -283,7 +283,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u64, u32, s64)>
|
||||
template <Result func(Core::System&, u64, u64, u32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
|
||||
static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
@@ -291,7 +291,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetInfo
|
||||
template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
|
||||
template <Result func(Core::System&, u64*, u64, Handle, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u64 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1),
|
||||
@@ -302,7 +302,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*, u64, u64, u64, u32, s32)>
|
||||
template <Result func(Core::System&, u32*, u64, u64, u64, u32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2), Param(system, 3),
|
||||
@@ -314,7 +314,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by CreateTransferMemory
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2),
|
||||
@@ -326,7 +326,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by CreateCodeMemory
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u64)>
|
||||
template <Result func(Core::System&, Handle*, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2)).raw;
|
||||
@@ -335,7 +335,7 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
|
||||
template <Result func(Core::System&, Handle*, u64, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
|
||||
@@ -347,7 +347,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitForAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
|
||||
template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
@@ -356,7 +356,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SignalToAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
|
||||
template <Result func(Core::System&, u64, Svc::SignalType, s32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
@@ -425,7 +425,7 @@ void SvcWrap64(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by QueryMemory32, ArbitrateLock32
|
||||
template <ResultCode func(Core::System&, u32, u32, u32)>
|
||||
template <Result func(Core::System&, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
FuncReturn32(system,
|
||||
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
|
||||
@@ -456,7 +456,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by CreateThread32
|
||||
template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
|
||||
template <Result func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
|
||||
@@ -469,7 +469,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetInfo32
|
||||
template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
|
||||
template <Result func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
@@ -484,7 +484,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetThreadPriority32, ConnectToNamedPort32
|
||||
template <ResultCode func(Core::System&, u32*, u32)>
|
||||
template <Result func(Core::System&, u32*, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param32(system, 1)).raw;
|
||||
@@ -493,7 +493,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetThreadId32
|
||||
template <ResultCode func(Core::System&, u32*, u32*, u32)>
|
||||
template <Result func(Core::System&, u32*, u32*, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
@@ -516,7 +516,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by CreateEvent32
|
||||
template <ResultCode func(Core::System&, Handle*, Handle*)>
|
||||
template <Result func(Core::System&, Handle*, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
Handle param_2 = 0;
|
||||
@@ -528,7 +528,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetThreadId32
|
||||
template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
|
||||
template <Result func(Core::System&, Handle, u32*, u32*, u32*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
@@ -542,7 +542,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by GetThreadCoreMask32
|
||||
template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
|
||||
template <Result func(Core::System&, Handle, s32*, u32*, u32*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
u32 param_2 = 0;
|
||||
@@ -562,7 +562,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetThreadActivity32
|
||||
template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
|
||||
template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
|
||||
static_cast<Svc::ThreadActivity>(Param(system, 1)))
|
||||
@@ -571,7 +571,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetThreadPriority32
|
||||
template <ResultCode func(Core::System&, Handle, u32)>
|
||||
template <Result func(Core::System&, Handle, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
|
||||
@@ -579,7 +579,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetMemoryAttribute32
|
||||
template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
|
||||
template <Result func(Core::System&, Handle, u32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
@@ -589,7 +589,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by MapSharedMemory32
|
||||
template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
|
||||
@@ -599,7 +599,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SetThreadCoreMask32
|
||||
template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
|
||||
template <Result func(Core::System&, Handle, s32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
|
||||
@@ -609,7 +609,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitProcessWideKeyAtomic32
|
||||
template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
|
||||
template <Result func(Core::System&, u32, u32, Handle, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
@@ -620,7 +620,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitForAddress32
|
||||
template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
|
||||
template <Result func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
@@ -631,7 +631,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SignalToAddress32
|
||||
template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
|
||||
template <Result func(Core::System&, u32, Svc::SignalType, s32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
@@ -641,13 +641,13 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by SendSyncRequest32, ArbitrateUnlock32
|
||||
template <ResultCode func(Core::System&, u32)>
|
||||
template <Result func(Core::System&, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
|
||||
}
|
||||
|
||||
// Used by CreateTransferMemory32
|
||||
template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle handle = 0;
|
||||
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
|
||||
@@ -658,7 +658,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitSynchronization32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
|
||||
template <Result func(Core::System&, u32, u32, s32, u32, s32*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
@@ -669,7 +669,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by CreateCodeMemory32
|
||||
template <ResultCode func(Core::System&, Handle*, u32, u32)>
|
||||
template <Result func(Core::System&, Handle*, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
Handle handle = 0;
|
||||
|
||||
@@ -680,7 +680,7 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by ControlCodeMemory32
|
||||
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
|
||||
template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
|
||||
|
||||
@@ -112,15 +112,15 @@ enum class ErrorModule : u32 {
|
||||
};
|
||||
|
||||
/// Encapsulates a Horizon OS error code, allowing it to be separated into its constituent fields.
|
||||
union ResultCode {
|
||||
union Result {
|
||||
u32 raw;
|
||||
|
||||
BitField<0, 9, ErrorModule> module;
|
||||
BitField<9, 13, u32> description;
|
||||
|
||||
constexpr explicit ResultCode(u32 raw_) : raw(raw_) {}
|
||||
constexpr explicit Result(u32 raw_) : raw(raw_) {}
|
||||
|
||||
constexpr ResultCode(ErrorModule module_, u32 description_)
|
||||
constexpr Result(ErrorModule module_, u32 description_)
|
||||
: raw(module.FormatValue(module_) | description.FormatValue(description_)) {}
|
||||
|
||||
[[nodiscard]] constexpr bool IsSuccess() const {
|
||||
@@ -132,18 +132,18 @@ union ResultCode {
|
||||
}
|
||||
};
|
||||
|
||||
[[nodiscard]] constexpr bool operator==(const ResultCode& a, const ResultCode& b) {
|
||||
[[nodiscard]] constexpr bool operator==(const Result& a, const Result& b) {
|
||||
return a.raw == b.raw;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool operator!=(const ResultCode& a, const ResultCode& b) {
|
||||
[[nodiscard]] constexpr bool operator!=(const Result& a, const Result& b) {
|
||||
return !operator==(a, b);
|
||||
}
|
||||
|
||||
// Convenience functions for creating some common kinds of errors:
|
||||
|
||||
/// The default success `ResultCode`.
|
||||
constexpr ResultCode ResultSuccess(0);
|
||||
/// The default success `Result`.
|
||||
constexpr Result ResultSuccess(0);
|
||||
|
||||
/**
|
||||
* Placeholder result code used for unknown error codes.
|
||||
@@ -151,24 +151,24 @@ constexpr ResultCode ResultSuccess(0);
|
||||
* @note This should only be used when a particular error code
|
||||
* is not known yet.
|
||||
*/
|
||||
constexpr ResultCode ResultUnknown(UINT32_MAX);
|
||||
constexpr Result ResultUnknown(UINT32_MAX);
|
||||
|
||||
/**
|
||||
* A ResultRange defines an inclusive range of error descriptions within an error module.
|
||||
* This can be used to check whether the description of a given ResultCode falls within the range.
|
||||
* The conversion function returns a ResultCode with its description set to description_start.
|
||||
* This can be used to check whether the description of a given Result falls within the range.
|
||||
* The conversion function returns a Result with its description set to description_start.
|
||||
*
|
||||
* An example of how it could be used:
|
||||
* \code
|
||||
* constexpr ResultRange ResultCommonError{ErrorModule::Common, 0, 9999};
|
||||
*
|
||||
* ResultCode Example(int value) {
|
||||
* const ResultCode result = OtherExample(value);
|
||||
* Result Example(int value) {
|
||||
* const Result result = OtherExample(value);
|
||||
*
|
||||
* // This will only evaluate to true if result.module is ErrorModule::Common and
|
||||
* // result.description is in between 0 and 9999 inclusive.
|
||||
* if (ResultCommonError.Includes(result)) {
|
||||
* // This returns ResultCode{ErrorModule::Common, 0};
|
||||
* // This returns Result{ErrorModule::Common, 0};
|
||||
* return ResultCommonError;
|
||||
* }
|
||||
*
|
||||
@@ -181,22 +181,22 @@ public:
|
||||
consteval ResultRange(ErrorModule module, u32 description_start, u32 description_end_)
|
||||
: code{module, description_start}, description_end{description_end_} {}
|
||||
|
||||
[[nodiscard]] constexpr operator ResultCode() const {
|
||||
[[nodiscard]] constexpr operator Result() const {
|
||||
return code;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool Includes(ResultCode other) const {
|
||||
[[nodiscard]] constexpr bool Includes(Result other) const {
|
||||
return code.module == other.module && code.description <= other.description &&
|
||||
other.description <= description_end;
|
||||
}
|
||||
|
||||
private:
|
||||
ResultCode code;
|
||||
Result code;
|
||||
u32 description_end;
|
||||
};
|
||||
|
||||
/**
|
||||
* This is an optional value type. It holds a `ResultCode` and, if that code is ResultSuccess, it
|
||||
* This is an optional value type. It holds a `Result` and, if that code is ResultSuccess, it
|
||||
* also holds a result of type `T`. If the code is an error code (not ResultSuccess), then trying
|
||||
* to access the inner value with operator* is undefined behavior and will assert with Unwrap().
|
||||
* Users of this class must be cognizant to check the status of the ResultVal with operator bool(),
|
||||
@@ -207,7 +207,7 @@ private:
|
||||
* ResultVal<int> Frobnicate(float strength) {
|
||||
* if (strength < 0.f || strength > 1.0f) {
|
||||
* // Can't frobnicate too weakly or too strongly
|
||||
* return ResultCode{ErrorModule::Common, 1};
|
||||
* return Result{ErrorModule::Common, 1};
|
||||
* } else {
|
||||
* // Frobnicated! Give caller a cookie
|
||||
* return 42;
|
||||
@@ -230,7 +230,7 @@ class ResultVal {
|
||||
public:
|
||||
constexpr ResultVal() : expected{} {}
|
||||
|
||||
constexpr ResultVal(ResultCode code) : expected{Common::Unexpected(code)} {}
|
||||
constexpr ResultVal(Result code) : expected{Common::Unexpected(code)} {}
|
||||
|
||||
constexpr ResultVal(ResultRange range) : expected{Common::Unexpected(range)} {}
|
||||
|
||||
@@ -252,7 +252,7 @@ public:
|
||||
return expected.has_value();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr ResultCode Code() const {
|
||||
[[nodiscard]] constexpr Result Code() const {
|
||||
return expected.has_value() ? ResultSuccess : expected.error();
|
||||
}
|
||||
|
||||
@@ -320,7 +320,7 @@ public:
|
||||
|
||||
private:
|
||||
// TODO (Morph): Replace this with C++23 std::expected.
|
||||
Common::Expected<T, ResultCode> expected;
|
||||
Common::Expected<T, Result> expected;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -337,7 +337,7 @@ private:
|
||||
target = std::move(*CONCAT2(check_result_L, __LINE__))
|
||||
|
||||
/**
|
||||
* Analogous to CASCADE_RESULT, but for a bare ResultCode. The code will be propagated if
|
||||
* Analogous to CASCADE_RESULT, but for a bare Result. The code will be propagated if
|
||||
* non-success, or discarded otherwise.
|
||||
*/
|
||||
#define CASCADE_CODE(source) \
|
||||
|
||||
@@ -28,11 +28,11 @@
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
constexpr ResultCode ERR_INVALID_USER_ID{ErrorModule::Account, 20};
|
||||
constexpr ResultCode ERR_INVALID_APPLICATION_ID{ErrorModule::Account, 22};
|
||||
constexpr ResultCode ERR_INVALID_BUFFER{ErrorModule::Account, 30};
|
||||
constexpr ResultCode ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 31};
|
||||
constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100};
|
||||
constexpr Result ERR_INVALID_USER_ID{ErrorModule::Account, 20};
|
||||
constexpr Result ERR_INVALID_APPLICATION_ID{ErrorModule::Account, 22};
|
||||
constexpr Result ERR_INVALID_BUFFER{ErrorModule::Account, 30};
|
||||
constexpr Result ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 31};
|
||||
constexpr Result ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100};
|
||||
|
||||
// Thumbnails are hard coded to be at least this size
|
||||
constexpr std::size_t THUMBNAIL_SIZE = 0x24000;
|
||||
@@ -505,7 +505,7 @@ protected:
|
||||
|
||||
void Cancel() override {}
|
||||
|
||||
ResultCode GetResult() const override {
|
||||
Result GetResult() const override {
|
||||
return ResultSuccess;
|
||||
}
|
||||
};
|
||||
@@ -747,7 +747,7 @@ void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestCo
|
||||
rb.Push(InitializeApplicationInfoBase());
|
||||
}
|
||||
|
||||
ResultCode Module::Interface::InitializeApplicationInfoBase() {
|
||||
Result Module::Interface::InitializeApplicationInfoBase() {
|
||||
if (application_info) {
|
||||
LOG_ERROR(Service_ACC, "Application already initialized");
|
||||
return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
|
||||
|
||||
@@ -41,7 +41,7 @@ public:
|
||||
void StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx);
|
||||
|
||||
private:
|
||||
ResultCode InitializeApplicationInfoBase();
|
||||
Result InitializeApplicationInfoBase();
|
||||
void StoreSaveDataThumbnail(Kernel::HLERequestContext& ctx, const Common::UUID& uuid,
|
||||
const u64 tid);
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ public:
|
||||
protected:
|
||||
virtual bool IsComplete() const = 0;
|
||||
virtual void Cancel() = 0;
|
||||
virtual ResultCode GetResult() const = 0;
|
||||
virtual Result GetResult() const = 0;
|
||||
|
||||
void MarkComplete();
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
constexpr ResultCode ERR_ACCOUNTINFO_BAD_APPLICATION{ErrorModule::Account, 22};
|
||||
constexpr ResultCode ERR_ACCOUNTINFO_ALREADY_INITIALIZED{ErrorModule::Account, 41};
|
||||
constexpr Result ERR_ACCOUNTINFO_BAD_APPLICATION{ErrorModule::Account, 22};
|
||||
constexpr Result ERR_ACCOUNTINFO_ALREADY_INITIALIZED{ErrorModule::Account, 41};
|
||||
|
||||
} // namespace Service::Account
|
||||
|
||||
@@ -33,9 +33,9 @@ struct ProfileDataRaw {
|
||||
static_assert(sizeof(ProfileDataRaw) == 0x650, "ProfileDataRaw has incorrect size.");
|
||||
|
||||
// TODO(ogniK): Get actual error codes
|
||||
constexpr ResultCode ERROR_TOO_MANY_USERS(ErrorModule::Account, u32(-1));
|
||||
constexpr ResultCode ERROR_USER_ALREADY_EXISTS(ErrorModule::Account, u32(-2));
|
||||
constexpr ResultCode ERROR_ARGUMENT_IS_NULL(ErrorModule::Account, 20);
|
||||
constexpr Result ERROR_TOO_MANY_USERS(ErrorModule::Account, u32(-1));
|
||||
constexpr Result ERROR_USER_ALREADY_EXISTS(ErrorModule::Account, u32(-2));
|
||||
constexpr Result ERROR_ARGUMENT_IS_NULL(ErrorModule::Account, 20);
|
||||
|
||||
constexpr char ACC_SAVE_AVATORS_BASE_PATH[] = "system/save/8000000000000010/su/avators";
|
||||
|
||||
@@ -87,7 +87,7 @@ bool ProfileManager::RemoveProfileAtIndex(std::size_t index) {
|
||||
}
|
||||
|
||||
/// Helper function to register a user to the system
|
||||
ResultCode ProfileManager::AddUser(const ProfileInfo& user) {
|
||||
Result ProfileManager::AddUser(const ProfileInfo& user) {
|
||||
if (!AddToProfiles(user)) {
|
||||
return ERROR_TOO_MANY_USERS;
|
||||
}
|
||||
@@ -96,7 +96,7 @@ ResultCode ProfileManager::AddUser(const ProfileInfo& user) {
|
||||
|
||||
/// Create a new user on the system. If the uuid of the user already exists, the user is not
|
||||
/// created.
|
||||
ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& username) {
|
||||
Result ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& username) {
|
||||
if (user_count == MAX_USERS) {
|
||||
return ERROR_TOO_MANY_USERS;
|
||||
}
|
||||
@@ -123,7 +123,7 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const ProfileUsername& usern
|
||||
/// Creates a new user on the system. This function allows a much simpler method of registration
|
||||
/// specifically by allowing an std::string for the username. This is required specifically since
|
||||
/// we're loading a string straight from the config
|
||||
ResultCode ProfileManager::CreateNewUser(UUID uuid, const std::string& username) {
|
||||
Result ProfileManager::CreateNewUser(UUID uuid, const std::string& username) {
|
||||
ProfileUsername username_output{};
|
||||
|
||||
if (username.size() > username_output.size()) {
|
||||
|
||||
@@ -64,9 +64,9 @@ public:
|
||||
ProfileManager();
|
||||
~ProfileManager();
|
||||
|
||||
ResultCode AddUser(const ProfileInfo& user);
|
||||
ResultCode CreateNewUser(Common::UUID uuid, const ProfileUsername& username);
|
||||
ResultCode CreateNewUser(Common::UUID uuid, const std::string& username);
|
||||
Result AddUser(const ProfileInfo& user);
|
||||
Result CreateNewUser(Common::UUID uuid, const ProfileUsername& username);
|
||||
Result CreateNewUser(Common::UUID uuid, const std::string& username);
|
||||
std::optional<Common::UUID> GetUser(std::size_t index) const;
|
||||
std::optional<std::size_t> GetUserIndex(const Common::UUID& uuid) const;
|
||||
std::optional<std::size_t> GetUserIndex(const ProfileInfo& user) const;
|
||||
|
||||
@@ -40,9 +40,9 @@
|
||||
|
||||
namespace Service::AM {
|
||||
|
||||
constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 2};
|
||||
constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 3};
|
||||
constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 503};
|
||||
constexpr Result ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 2};
|
||||
constexpr Result ERR_NO_MESSAGES{ErrorModule::AM, 3};
|
||||
constexpr Result ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 503};
|
||||
|
||||
enum class LaunchParameterKind : u32 {
|
||||
ApplicationSpecific = 1,
|
||||
@@ -365,7 +365,7 @@ void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
|
||||
// Entry and exit of fatal sections must be balanced.
|
||||
if (num_fatal_sections_entered == 0) {
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultCode{ErrorModule::AM, 512});
|
||||
rb.Push(Result{ErrorModule::AM, 512});
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
namespace Service::AM::Applets {
|
||||
|
||||
// This error code (0x183ACA) is thrown when the applet fails to initialize.
|
||||
[[maybe_unused]] constexpr ResultCode ERR_CONTROLLER_APPLET_3101{ErrorModule::HID, 3101};
|
||||
[[maybe_unused]] constexpr Result ERR_CONTROLLER_APPLET_3101{ErrorModule::HID, 3101};
|
||||
// This error code (0x183CCA) is thrown when the u32 result in ControllerSupportResultInfo is 2.
|
||||
[[maybe_unused]] constexpr ResultCode ERR_CONTROLLER_APPLET_3102{ErrorModule::HID, 3102};
|
||||
[[maybe_unused]] constexpr Result ERR_CONTROLLER_APPLET_3102{ErrorModule::HID, 3102};
|
||||
|
||||
static Core::Frontend::ControllerParameters ConvertToFrontendParameters(
|
||||
ControllerSupportArgPrivate private_arg, ControllerSupportArgHeader header, bool enable_text,
|
||||
@@ -173,7 +173,7 @@ bool Controller::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode Controller::GetStatus() const {
|
||||
Result Controller::GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ public:
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
@@ -143,7 +143,7 @@ private:
|
||||
ControllerUpdateFirmwareArg controller_update_arg;
|
||||
ControllerKeyRemappingArg controller_key_remapping_arg;
|
||||
bool complete{false};
|
||||
ResultCode status{ResultSuccess};
|
||||
Result status{ResultSuccess};
|
||||
bool is_single_mode{false};
|
||||
std::vector<u8> out_data;
|
||||
};
|
||||
|
||||
@@ -25,15 +25,15 @@ struct ErrorCode {
|
||||
};
|
||||
}
|
||||
|
||||
static constexpr ErrorCode FromResultCode(ResultCode result) {
|
||||
static constexpr ErrorCode FromResult(Result result) {
|
||||
return {
|
||||
.error_category{2000 + static_cast<u32>(result.module.Value())},
|
||||
.error_number{result.description.Value()},
|
||||
};
|
||||
}
|
||||
|
||||
constexpr ResultCode ToResultCode() const {
|
||||
return ResultCode{static_cast<ErrorModule>(error_category - 2000), error_number};
|
||||
constexpr Result ToResult() const {
|
||||
return Result{static_cast<ErrorModule>(error_category - 2000), error_number};
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(ErrorCode) == 0x8, "ErrorCode has incorrect size.");
|
||||
@@ -97,8 +97,8 @@ void CopyArgumentData(const std::vector<u8>& data, T& variable) {
|
||||
std::memcpy(&variable, data.data(), sizeof(T));
|
||||
}
|
||||
|
||||
ResultCode Decode64BitError(u64 error) {
|
||||
return ErrorCode::FromU64(error).ToResultCode();
|
||||
Result Decode64BitError(u64 error) {
|
||||
return ErrorCode::FromU64(error).ToResult();
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
@@ -127,16 +127,16 @@ void Error::Initialize() {
|
||||
if (args->error.use_64bit_error_code) {
|
||||
error_code = Decode64BitError(args->error.error_code_64);
|
||||
} else {
|
||||
error_code = ResultCode(args->error.error_code_32);
|
||||
error_code = Result(args->error.error_code_32);
|
||||
}
|
||||
break;
|
||||
case ErrorAppletMode::ShowSystemError:
|
||||
CopyArgumentData(data, args->system_error);
|
||||
error_code = ResultCode(Decode64BitError(args->system_error.error_code_64));
|
||||
error_code = Result(Decode64BitError(args->system_error.error_code_64));
|
||||
break;
|
||||
case ErrorAppletMode::ShowApplicationError:
|
||||
CopyArgumentData(data, args->application_error);
|
||||
error_code = ResultCode(args->application_error.error_code);
|
||||
error_code = Result(args->application_error.error_code);
|
||||
break;
|
||||
case ErrorAppletMode::ShowErrorRecord:
|
||||
CopyArgumentData(data, args->error_record);
|
||||
@@ -151,7 +151,7 @@ bool Error::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode Error::GetStatus() const {
|
||||
Result Error::GetStatus() const {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ public:
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
@@ -41,7 +41,7 @@ private:
|
||||
union ErrorArguments;
|
||||
|
||||
const Core::Frontend::ErrorApplet& frontend;
|
||||
ResultCode error_code = ResultSuccess;
|
||||
Result error_code = ResultSuccess;
|
||||
ErrorAppletMode mode = ErrorAppletMode::ShowError;
|
||||
std::unique_ptr<ErrorArguments> args;
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace Service::AM::Applets {
|
||||
|
||||
constexpr ResultCode ERROR_INVALID_PIN{ErrorModule::PCTL, 221};
|
||||
constexpr Result ERROR_INVALID_PIN{ErrorModule::PCTL, 221};
|
||||
|
||||
static void LogCurrentStorage(AppletDataBroker& broker, std::string_view prefix) {
|
||||
std::shared_ptr<IStorage> storage = broker.PopNormalDataToApplet();
|
||||
@@ -71,7 +71,7 @@ bool Auth::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode Auth::GetStatus() const {
|
||||
Result Auth::GetStatus() const {
|
||||
return successful ? ResultSuccess : ERROR_INVALID_PIN;
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ void Auth::AuthFinished(bool is_successful) {
|
||||
successful = is_successful;
|
||||
|
||||
struct Return {
|
||||
ResultCode result_code;
|
||||
Result result_code;
|
||||
};
|
||||
static_assert(sizeof(Return) == 0x4, "Return (AuthApplet) has incorrect size.");
|
||||
|
||||
@@ -170,7 +170,7 @@ bool PhotoViewer::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode PhotoViewer::GetStatus() const {
|
||||
Result PhotoViewer::GetStatus() const {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ bool StubApplet::TransactionComplete() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultCode StubApplet::GetStatus() const {
|
||||
Result StubApplet::GetStatus() const {
|
||||
LOG_WARNING(Service_AM, "called (STUBBED)");
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ public:
|
||||
|
||||
void Initialize() override;
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
@@ -56,7 +56,7 @@ public:
|
||||
|
||||
void Initialize() override;
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
@@ -77,7 +77,7 @@ public:
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ bool MiiEdit::TransactionComplete() const {
|
||||
return is_complete;
|
||||
}
|
||||
|
||||
ResultCode MiiEdit::GetStatus() const {
|
||||
Result MiiEdit::GetStatus() const {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ public:
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
namespace Service::AM::Applets {
|
||||
|
||||
constexpr ResultCode ERR_USER_CANCELLED_SELECTION{ErrorModule::Account, 1};
|
||||
constexpr Result ERR_USER_CANCELLED_SELECTION{ErrorModule::Account, 1};
|
||||
|
||||
ProfileSelect::ProfileSelect(Core::System& system_, LibraryAppletMode applet_mode_,
|
||||
const Core::Frontend::ProfileSelectApplet& frontend_)
|
||||
@@ -39,7 +39,7 @@ bool ProfileSelect::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode ProfileSelect::GetStatus() const {
|
||||
Result ProfileSelect::GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ public:
|
||||
void Initialize() override;
|
||||
|
||||
bool TransactionComplete() const override;
|
||||
ResultCode GetStatus() const override;
|
||||
Result GetStatus() const override;
|
||||
void ExecuteInteractive() override;
|
||||
void Execute() override;
|
||||
|
||||
@@ -50,7 +50,7 @@ private:
|
||||
|
||||
UserSelectionConfig config;
|
||||
bool complete = false;
|
||||
ResultCode status = ResultSuccess;
|
||||
Result status = ResultSuccess;
|
||||
std::vector<u8> final_data;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
@@ -80,7 +80,7 @@ bool SoftwareKeyboard::TransactionComplete() const {
|
||||
return complete;
|
||||
}
|
||||
|
||||
ResultCode SoftwareKeyboard::GetStatus() const {
|
||||
Result SoftwareKeyboard::GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user