Compare commits
9 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
228a381aed | ||
|
|
c22c4f5d59 | ||
|
|
218d790bd6 | ||
|
|
b8f3e5157b | ||
|
|
35d94dcb2b | ||
|
|
4a13f9eecd | ||
|
|
ad99bbf5fe | ||
|
|
b387a26f30 | ||
|
|
75c4aec8ab |
@@ -131,7 +131,7 @@ add_definitions(-DBOOST_ASIO_DISABLE_CONCEPTS)
|
||||
if (MSVC)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/std:c++latest>)
|
||||
|
||||
# boost still makes use of deprecated result_of.
|
||||
# cubeb and boost still make use of deprecated result_of.
|
||||
add_definitions(-D_HAS_DEPRECATED_RESULT_OF)
|
||||
else()
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
@@ -370,7 +370,7 @@ if (ENABLE_SDL2)
|
||||
if (YUZU_USE_BUNDLED_SDL2)
|
||||
# Detect toolchain and platform
|
||||
if ((MSVC_VERSION GREATER_EQUAL 1920 AND MSVC_VERSION LESS 1940) AND ARCHITECTURE_x86_64)
|
||||
set(SDL2_VER "SDL2-2.0.18")
|
||||
set(SDL2_VER "SDL2-2.0.16")
|
||||
else()
|
||||
message(FATAL_ERROR "No bundled SDL2 binaries for your toolchain. Disable YUZU_USE_BUNDLED_SDL2 and provide your own.")
|
||||
endif()
|
||||
@@ -390,7 +390,7 @@ if (ENABLE_SDL2)
|
||||
elseif (YUZU_USE_EXTERNAL_SDL2)
|
||||
message(STATUS "Using SDL2 from externals.")
|
||||
else()
|
||||
find_package(SDL2 2.0.18 REQUIRED)
|
||||
find_package(SDL2 2.0.16 REQUIRED)
|
||||
|
||||
# Some installations don't set SDL2_LIBRARIES
|
||||
if("${SDL2_LIBRARIES}" STREQUAL "")
|
||||
|
||||
@@ -17,7 +17,7 @@ It is written in C++ with portability in mind, and we actively maintain builds f
|
||||
alt="Azure Mainline CI Build Status">
|
||||
</a>
|
||||
<a href="https://discord.com/invite/u77vRWY">
|
||||
<img src="https://img.shields.io/discord/398318088170242053?color=5865F2&label=yuzu&logo=discord&logoColor=white"
|
||||
<img src="https://img.shields.io/discord/398318088170242053?color=%237289DA&label=yuzu&logo=discord&logoColor=white"
|
||||
alt="Discord">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
4
externals/CMakeLists.txt
vendored
4
externals/CMakeLists.txt
vendored
@@ -44,6 +44,10 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
|
||||
add_library(microprofile INTERFACE)
|
||||
target_include_directories(microprofile INTERFACE ./microprofile)
|
||||
|
||||
# Unicorn
|
||||
add_library(unicorn-headers INTERFACE)
|
||||
target_include_directories(unicorn-headers INTERFACE ./unicorn/include)
|
||||
|
||||
# libusb
|
||||
if (NOT LIBUSB_FOUND OR YUZU_USE_BUNDLED_LIBUSB)
|
||||
add_subdirectory(libusb)
|
||||
|
||||
2
externals/SDL
vendored
2
externals/SDL
vendored
Submodule externals/SDL updated: 2e9821423a...25f9ed87ff
2
externals/cubeb
vendored
2
externals/cubeb
vendored
Submodule externals/cubeb updated: 75d9d125ee...1d66483ad2
18
externals/find-modules/FindUnicorn.cmake
vendored
Normal file
18
externals/find-modules/FindUnicorn.cmake
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Exports:
|
||||
# LIBUNICORN_FOUND
|
||||
# LIBUNICORN_INCLUDE_DIR
|
||||
# LIBUNICORN_LIBRARY
|
||||
|
||||
find_path(LIBUNICORN_INCLUDE_DIR
|
||||
unicorn/unicorn.h
|
||||
HINTS $ENV{UNICORNDIR}
|
||||
PATH_SUFFIXES include)
|
||||
|
||||
find_library(LIBUNICORN_LIBRARY
|
||||
NAMES unicorn
|
||||
HINTS $ENV{UNICORNDIR})
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(unicorn DEFAULT_MSG
|
||||
LIBUNICORN_LIBRARY LIBUNICORN_INCLUDE_DIR)
|
||||
mark_as_advanced(LIBUNICORN_INCLUDE_DIR LIBUNICORN_LIBRARY)
|
||||
@@ -24,7 +24,6 @@ if (MSVC)
|
||||
# /W3 - Level 3 warnings
|
||||
# /MP - Multi-threaded compilation
|
||||
# /Zi - Output debugging information
|
||||
# /Zm - Specifies the precompiled header memory allocation limit
|
||||
# /Zo - Enhanced debug info for optimized builds
|
||||
# /permissive- - Enables stricter C++ standards conformance checks
|
||||
# /EHsc - C++-only exception handling semantics
|
||||
@@ -37,7 +36,6 @@ if (MSVC)
|
||||
add_compile_options(
|
||||
/MP
|
||||
/Zi
|
||||
/Zm200
|
||||
/Zo
|
||||
/permissive-
|
||||
/EHsc
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
#include "audio_core/delay_line.h"
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <iterator>
|
||||
|
||||
@@ -114,7 +114,6 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
|
||||
SUB(Service, NGCT) \
|
||||
SUB(Service, NIFM) \
|
||||
SUB(Service, NIM) \
|
||||
SUB(Service, NOTIF) \
|
||||
SUB(Service, NPNS) \
|
||||
SUB(Service, NS) \
|
||||
SUB(Service, NVDRV) \
|
||||
|
||||
@@ -82,7 +82,6 @@ enum class Class : u8 {
|
||||
Service_NGCT, ///< The NGCT (No Good Content for Terra) service
|
||||
Service_NIFM, ///< The NIFM (Network interface) service
|
||||
Service_NIM, ///< The NIM service
|
||||
Service_NOTIF, ///< The NOTIF (Notification) service
|
||||
Service_NPNS, ///< The NPNS service
|
||||
Service_NS, ///< The NS services
|
||||
Service_NVDRV, ///< The NVDRV (Nvidia driver) service
|
||||
|
||||
@@ -179,15 +179,12 @@ add_library(core STATIC
|
||||
hle/kernel/k_client_port.h
|
||||
hle/kernel/k_client_session.cpp
|
||||
hle/kernel/k_client_session.h
|
||||
hle/kernel/k_code_memory.cpp
|
||||
hle/kernel/k_code_memory.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
hle/kernel/k_event.cpp
|
||||
hle/kernel/k_event.h
|
||||
hle/kernel/k_handle_table.cpp
|
||||
hle/kernel/k_handle_table.h
|
||||
hle/kernel/k_light_condition_variable.cpp
|
||||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
@@ -240,7 +237,6 @@ add_library(core STATIC
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_queue.cpp
|
||||
hle/kernel/k_thread_queue.h
|
||||
hle/kernel/k_trace.h
|
||||
hle/kernel/k_transfer_memory.cpp
|
||||
@@ -412,8 +408,6 @@ add_library(core STATIC
|
||||
hle/service/glue/glue.h
|
||||
hle/service/glue/glue_manager.cpp
|
||||
hle/service/glue/glue_manager.h
|
||||
hle/service/glue/notif.cpp
|
||||
hle/service/glue/notif.h
|
||||
hle/service/grc/grc.cpp
|
||||
hle/service/grc/grc.h
|
||||
hle/service/hid/hid.cpp
|
||||
|
||||
@@ -521,6 +521,12 @@ const ARM_Interface& System::CurrentArmInterface() const {
|
||||
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
||||
}
|
||||
|
||||
std::size_t System::CurrentCoreIndex() const {
|
||||
std::size_t core = impl->kernel.GetCurrentHostThreadID();
|
||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||
return core;
|
||||
}
|
||||
|
||||
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
||||
return impl->kernel.CurrentPhysicalCore();
|
||||
}
|
||||
|
||||
@@ -208,6 +208,9 @@ public:
|
||||
/// Gets an ARM interface to the CPU core that is currently running
|
||||
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
||||
|
||||
/// Gets the index of the currently running CPU core
|
||||
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
||||
|
||||
/// Gets the physical core for the CPU core that is currently running
|
||||
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||
|
||||
|
||||
@@ -117,18 +117,17 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||
physical_core = &kernel.CurrentPhysicalCore();
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
{
|
||||
Kernel::KScopedDisableDispatch dd(kernel);
|
||||
physical_core->ArmInterface().ClearExclusiveState();
|
||||
}
|
||||
physical_core->ArmInterface().ClearExclusiveState();
|
||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::MultiCoreRunIdleThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
while (true) {
|
||||
Kernel::KScopedDisableDispatch dd(kernel);
|
||||
kernel.CurrentPhysicalCore().Idle();
|
||||
auto& physical_core = kernel.CurrentPhysicalCore();
|
||||
physical_core.Idle();
|
||||
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,12 +135,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
while (true) {
|
||||
auto core = kernel.CurrentPhysicalCoreIndex();
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
@@ -347,11 +346,15 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||
sc_sync_first_use = false;
|
||||
}
|
||||
|
||||
// Emulation was stopped
|
||||
if (stop_token.stop_requested()) {
|
||||
// Abort if emulation was killed before the session really starts
|
||||
if (!system.IsPoweredOn()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (stop_token.stop_requested()) {
|
||||
break;
|
||||
}
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
data.is_running = true;
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
|
||||
@@ -145,16 +145,6 @@ NpadIdType HIDCore::GetFirstNpadId() const {
|
||||
return NpadIdType::Player1;
|
||||
}
|
||||
|
||||
NpadIdType HIDCore::GetFirstDisconnectedNpadId() const {
|
||||
for (std::size_t player_index = 0; player_index < available_controllers; ++player_index) {
|
||||
const auto* const controller = GetEmulatedControllerByIndex(player_index);
|
||||
if (!controller->IsConnected()) {
|
||||
return controller->GetNpadIdType();
|
||||
}
|
||||
}
|
||||
return NpadIdType::Player1;
|
||||
}
|
||||
|
||||
void HIDCore::EnableAllControllerConfiguration() {
|
||||
player_1->EnableConfiguration();
|
||||
player_2->EnableConfiguration();
|
||||
|
||||
@@ -45,9 +45,6 @@ public:
|
||||
/// Returns the first connected npad id
|
||||
NpadIdType GetFirstNpadId() const;
|
||||
|
||||
/// Returns the first disconnected npad id
|
||||
NpadIdType GetFirstDisconnectedNpadId() const;
|
||||
|
||||
/// Sets all emulated controllers into configuring mode.
|
||||
void EnableAllControllerConfiguration();
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include "core/core.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
@@ -33,7 +32,6 @@ namespace Kernel::Init {
|
||||
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
|
||||
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
@@ -29,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||
|
||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
@@ -59,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||
|
||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
@@ -86,27 +85,6 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||
return true;
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
waiting_thread->ClearAddressArbiter();
|
||||
}
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
|
||||
private:
|
||||
KAddressArbiter::ThreadTree* m_tree;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
@@ -118,14 +96,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->EndWait(ResultSuccess);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
@@ -151,14 +129,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->EndWait(ResultSuccess);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
@@ -219,14 +197,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->EndWait(ResultSuccess);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
@@ -236,7 +214,6 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
@@ -247,6 +224,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
@@ -276,20 +256,31 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
return cur_thread->GetWaitResult();
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(&dummy);
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
@@ -300,6 +291,9 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, &user_value, addr)) {
|
||||
@@ -322,14 +316,26 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
return cur_thread->GetWaitResult();
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(&dummy);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -170,10 +170,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
const std::string& GetName() const {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
void RegisterWithKernel();
|
||||
void UnregisterWithKernel();
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include "core/hle/kernel/k_class_token.h"
|
||||
#include "core/hle/kernel/k_client_port.h"
|
||||
#include "core/hle/kernel/k_client_session.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_port.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
@@ -49,7 +48,7 @@ static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
|
||||
static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
|
||||
// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
|
||||
// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
|
||||
static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
|
||||
// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
|
||||
|
||||
// Ensure that the token hierarchy is correct.
|
||||
|
||||
@@ -80,7 +79,7 @@ static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAut
|
||||
static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
|
||||
// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
|
||||
// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
|
||||
static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
|
||||
// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
|
||||
|
||||
// Ensure that the token hierarchy reflects the class hierarchy.
|
||||
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
|
||||
|
||||
ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
// Set members.
|
||||
m_owner = kernel.CurrentProcess();
|
||||
|
||||
// Get the owner page table.
|
||||
auto& page_table = m_owner->PageTable();
|
||||
|
||||
// Construct the page group.
|
||||
KMemoryInfo kBlockInfo = page_table.QueryInfo(addr);
|
||||
m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
|
||||
|
||||
// Lock the memory.
|
||||
R_TRY(page_table.LockForCodeMemory(addr, size))
|
||||
|
||||
// Clear the memory.
|
||||
for (const auto& block : m_page_group.Nodes()) {
|
||||
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||
}
|
||||
|
||||
// Set remaining tracking members.
|
||||
m_address = addr;
|
||||
m_is_initialized = true;
|
||||
m_is_owner_mapped = false;
|
||||
m_is_mapped = false;
|
||||
|
||||
// We succeeded.
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void KCodeMemory::Finalize() {
|
||||
// Unlock.
|
||||
if (!m_is_mapped && !m_is_owner_mapped) {
|
||||
const size_t size = m_page_group.GetNumPages() * PageSize;
|
||||
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Ensure we're not already mapped.
|
||||
R_UNLESS(!m_is_mapped, ResultInvalidState);
|
||||
|
||||
// Map the memory.
|
||||
R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
|
||||
address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
|
||||
|
||||
// Mark ourselves as mapped.
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
|
||||
KMemoryState::CodeOut));
|
||||
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Ensure we're not already mapped.
|
||||
R_UNLESS(!m_is_owner_mapped, ResultInvalidState);
|
||||
|
||||
// Convert the memory permission.
|
||||
KMemoryPermission k_perm{};
|
||||
switch (perm) {
|
||||
case Svc::MemoryPermission::Read:
|
||||
k_perm = KMemoryPermission::UserRead;
|
||||
break;
|
||||
case Svc::MemoryPermission::ReadExecute:
|
||||
k_perm = KMemoryPermission::UserReadExecute;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Map the memory.
|
||||
R_TRY(
|
||||
m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
|
||||
|
||||
// Mark ourselves as mapped.
|
||||
m_is_owner_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
// Lock ourselves.
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
|
||||
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_owner_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,66 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
enum class CodeMemoryOperation : u32 {
|
||||
Map = 0,
|
||||
MapToOwner = 1,
|
||||
Unmap = 2,
|
||||
UnmapFromOwner = 3,
|
||||
};
|
||||
|
||||
class KCodeMemory final
|
||||
: public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KCodeMemory(KernelCore& kernel_);
|
||||
|
||||
ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
void Finalize();
|
||||
|
||||
ResultCode Map(VAddr address, size_t size);
|
||||
ResultCode Unmap(VAddr address, size_t size);
|
||||
ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
ResultCode UnmapFromOwner(VAddr address, size_t size);
|
||||
|
||||
bool IsInitialized() const {
|
||||
return m_is_initialized;
|
||||
}
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
|
||||
KProcess* GetOwner() const {
|
||||
return m_owner;
|
||||
}
|
||||
VAddr GetSourceAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
KPageLinkedList m_page_group{};
|
||||
KProcess* m_owner{};
|
||||
VAddr m_address{};
|
||||
KLightLock m_lock;
|
||||
bool m_is_initialized{};
|
||||
bool m_is_owner_mapped{};
|
||||
bool m_is_mapped{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -11,7 +11,6 @@
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
@@ -34,7 +33,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||
u32 new_orr_mask) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// Load the value from the address.
|
||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||
@@ -58,48 +57,6 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
|
||||
return true;
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||
: KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
|
||||
private:
|
||||
KConditionVariable::ThreadTree* m_tree;
|
||||
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
}
|
||||
|
||||
// If the thread is waiting on a condvar, remove it from the tree.
|
||||
if (waiting_thread->IsWaitingForConditionVariable()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
waiting_thread->ClearConditionVariable();
|
||||
}
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
@@ -121,77 +78,84 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
|
||||
// Determine the next tag.
|
||||
u32 next_value{};
|
||||
if (next_owner_thread != nullptr) {
|
||||
if (next_owner_thread) {
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= Svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
ResultCode result{ResultSuccess};
|
||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
result = ResultInvalidCurrentMemory;
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (next_owner_thread) {
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
// Signal the next owner thread.
|
||||
next_owner_thread->EndWait(result);
|
||||
return result;
|
||||
} else {
|
||||
// Just write the value to userspace.
|
||||
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
return ResultSuccess;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
|
||||
// Wait for the address.
|
||||
KThread* owner_thread{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedAutoObject<KThread> owner_thread;
|
||||
ASSERT(owner_thread.IsNull());
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
cur_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||
{
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = kernel.CurrentProcess()
|
||||
->GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||
.ReleasePointerUnsafe();
|
||||
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
|
||||
// Get the lock owner thread.
|
||||
owner_thread =
|
||||
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
|
||||
handle);
|
||||
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
|
||||
|
||||
// Update the lock.
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
// Begin waiting.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
// Update the lock.
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
ASSERT(owner_thread.IsNotNull());
|
||||
}
|
||||
|
||||
// Close our reference to the owner thread, now that the wait is over.
|
||||
owner_thread->Close();
|
||||
// Remove the thread as a waiter from the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KThread* owner_thread = cur_thread->GetLockOwner();
|
||||
if (owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
@@ -205,16 +169,18 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
can_access = true;
|
||||
if (can_access) [[likely]] {
|
||||
if (can_access) {
|
||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||
Svc::HandleWaitMask);
|
||||
}
|
||||
}
|
||||
|
||||
if (can_access) [[likely]] {
|
||||
KThread* thread_to_close = nullptr;
|
||||
if (can_access) {
|
||||
if (prev_tag == Svc::InvalidHandle) {
|
||||
// If nobody held the lock previously, we're all good.
|
||||
thread->EndWait(ResultSuccess);
|
||||
thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
thread->Wakeup();
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
KThread* owner_thread = kernel.CurrentProcess()
|
||||
@@ -223,22 +189,33 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
||||
.ReleasePointerUnsafe();
|
||||
|
||||
if (owner_thread) [[likely]] {
|
||||
if (owner_thread) {
|
||||
// Add the thread as a waiter on the owner.
|
||||
owner_thread->AddWaiter(thread);
|
||||
owner_thread->Close();
|
||||
thread_to_close = owner_thread;
|
||||
} else {
|
||||
// The lock was tagged with a thread that doesn't exist.
|
||||
thread->EndWait(ResultInvalidState);
|
||||
thread->SetSyncedObject(nullptr, ResultInvalidState);
|
||||
thread->Wakeup();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the address wasn't accessible, note so.
|
||||
thread->EndWait(ResultInvalidCurrentMemory);
|
||||
thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||
thread->Wakeup();
|
||||
}
|
||||
|
||||
return thread_to_close;
|
||||
}
|
||||
|
||||
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
// Prepare for signaling.
|
||||
constexpr int MaxThreads = 16;
|
||||
|
||||
KLinkedList<KThread> thread_list{kernel};
|
||||
std::array<KThread*, MaxThreads> thread_array;
|
||||
s32 num_to_close{};
|
||||
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -249,7 +226,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
this->SignalImpl(target_thread);
|
||||
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||
if (num_to_close < MaxThreads) {
|
||||
thread_array[num_to_close++] = thread;
|
||||
} else {
|
||||
thread_list.push_back(*thread);
|
||||
}
|
||||
}
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearConditionVariable();
|
||||
++num_waiters;
|
||||
@@ -261,16 +245,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||
}
|
||||
}
|
||||
|
||||
// Close threads in the array.
|
||||
for (auto i = 0; i < num_to_close; ++i) {
|
||||
thread_array[i]->Close();
|
||||
}
|
||||
|
||||
// Close threads in the list.
|
||||
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
||||
(*it).Close();
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||
kernel, std::addressof(thread_tree));
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
@@ -295,7 +290,8 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
}
|
||||
|
||||
// Wake up the next owner.
|
||||
next_owner_thread->EndWait(ResultSuccess);
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write to the cv key.
|
||||
@@ -312,21 +308,40 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// If timeout is zero, time out.
|
||||
R_UNLESS(timeout != 0, ResultTimedOut);
|
||||
|
||||
// Update condition variable tracking.
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
{
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
}
|
||||
|
||||
// Begin waiting.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
// If the timeout is non-zero, set the thread as waiting.
|
||||
if (timeout != 0) {
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the condition variable.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
|
||||
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearConditionVariable();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -34,7 +34,7 @@ public:
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
void SignalImpl(KThread* thread);
|
||||
[[nodiscard]] KThread* SignalImpl(KThread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() {
|
||||
// Get the table and clear our record of it.
|
||||
u16 saved_table_size = 0;
|
||||
{
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
std::swap(m_table_size, saved_table_size);
|
||||
@@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) {
|
||||
// Find the object and free the entry.
|
||||
KAutoObject* obj = nullptr;
|
||||
{
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
if (this->IsValidHandle(handle)) {
|
||||
@@ -64,7 +62,6 @@ bool KHandleTable::Remove(Handle handle) {
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Never exceed our capacity.
|
||||
@@ -87,7 +84,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Never exceed our capacity.
|
||||
@@ -98,7 +94,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||
}
|
||||
|
||||
void KHandleTable::Unreserve(Handle handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Unpack the handle.
|
||||
@@ -117,7 +112,6 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||
}
|
||||
|
||||
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Unpack the handle.
|
||||
|
||||
@@ -68,7 +68,6 @@ public:
|
||||
template <typename T = KAutoObject>
|
||||
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
||||
// Lock and look up in table.
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||
@@ -123,7 +122,6 @@ public:
|
||||
size_t num_opened;
|
||||
{
|
||||
// Lock the table.
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||
// Get the current handle.
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_light_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
|
||||
public:
|
||||
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
|
||||
bool term)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Only process waits if we're allowed to.
|
||||
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove the thread from the waiting thread from the light condition variable.
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
|
||||
private:
|
||||
KThread::WaiterList* m_wait_list;
|
||||
bool m_allow_terminating_thread;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||
// Create thread queue.
|
||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
|
||||
allow_terminating_thread);
|
||||
|
||||
// Sleep the thread.
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
|
||||
lock->Unlock();
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(*owner);
|
||||
|
||||
// Begin waiting.
|
||||
owner->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
// Re-acquire the lock.
|
||||
lock->Lock();
|
||||
}
|
||||
|
||||
void KLightConditionVariable::Broadcast() {
|
||||
KScopedSchedulerLock lk(kernel);
|
||||
|
||||
// Signal all threads.
|
||||
for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
|
||||
it->EndWait(ResultSuccess);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -2,24 +2,72 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KLightLock;
|
||||
|
||||
class KLightConditionVariable {
|
||||
public:
|
||||
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
|
||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
|
||||
void Broadcast();
|
||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) {
|
||||
WaitImpl(lock, timeout, allow_terminating_thread);
|
||||
}
|
||||
|
||||
void Broadcast() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
// Signal all threads.
|
||||
for (auto& thread : wait_list) {
|
||||
thread.SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Sleep the thread.
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
|
||||
lock->Unlock();
|
||||
|
||||
// Set the thread as waiting.
|
||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(GetCurrentThread(kernel));
|
||||
}
|
||||
|
||||
// Remove the thread from the wait list.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
|
||||
}
|
||||
|
||||
// Cancel the task that the sleep setup.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(owner);
|
||||
|
||||
// Re-acquire the lock.
|
||||
lock->Lock();
|
||||
}
|
||||
|
||||
KernelCore& kernel;
|
||||
KThread::WaiterList wait_list{};
|
||||
};
|
||||
|
||||
@@ -5,59 +5,44 @@
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
}
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void KLightLock::Lock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
||||
|
||||
while (true) {
|
||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||
|
||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
|
||||
std::memory_order_acquire)) {
|
||||
if ((old_tag | 1) == cur_thread_tag) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
|
||||
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
|
||||
break;
|
||||
}
|
||||
|
||||
LockSlowPath(old_tag | 1, cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void KLightLock::Unlock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
|
||||
uintptr_t expected = cur_thread;
|
||||
if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||
this->UnlockSlowPath(cur_thread);
|
||||
}
|
||||
do {
|
||||
if (expected != cur_thread) {
|
||||
return UnlockSlowPath(cur_thread);
|
||||
}
|
||||
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
||||
}
|
||||
|
||||
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||
ThreadQueueImplForKLightLock wait_queue(kernel);
|
||||
|
||||
// Pend the current thread waiting on the owner thread.
|
||||
{
|
||||
@@ -65,7 +50,7 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
|
||||
// Ensure we actually have locking to do.
|
||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
// Add the current thread as a waiter on the owner.
|
||||
@@ -73,15 +58,22 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
// Begin waiting to hold the lock.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
// Set thread states.
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
|
||||
if (owner_thread->IsSuspended()) {
|
||||
owner_thread->ContinueIfHasKernelWaiters();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
// We're no longer waiting on the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
@@ -89,20 +81,22 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
|
||||
// Unlock.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Get the next owner.
|
||||
s32 num_waiters;
|
||||
s32 num_waiters = 0;
|
||||
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
||||
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
|
||||
// Pass the lock to the next owner.
|
||||
uintptr_t next_tag = 0;
|
||||
if (next_owner != nullptr) {
|
||||
next_tag =
|
||||
reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
|
||||
next_tag = reinterpret_cast<uintptr_t>(next_owner);
|
||||
if (num_waiters > 1) {
|
||||
next_tag |= 0x1;
|
||||
}
|
||||
|
||||
next_owner->EndWait(ResultSuccess);
|
||||
next_owner->SetState(ThreadState::Runnable);
|
||||
|
||||
if (next_owner->IsSuspended()) {
|
||||
next_owner->ContinueIfHasKernelWaiters();
|
||||
@@ -116,7 +110,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
}
|
||||
|
||||
// Write the new tag value.
|
||||
tag.store(next_tag, std::memory_order_release);
|
||||
tag.store(next_tag);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ public:
|
||||
|
||||
void Unlock();
|
||||
|
||||
bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
|
||||
@@ -131,26 +131,6 @@ enum class KMemoryPermission : u8 {
|
||||
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
|
||||
KernelShift = 3,
|
||||
|
||||
KernelRead = Read << KernelShift,
|
||||
KernelWrite = Write << KernelShift,
|
||||
KernelExecute = Execute << KernelShift,
|
||||
|
||||
NotMapped = (1 << (2 * KernelShift)),
|
||||
|
||||
KernelReadWrite = KernelRead | KernelWrite,
|
||||
KernelReadExecute = KernelRead | KernelExecute,
|
||||
|
||||
UserRead = Read | KernelRead,
|
||||
UserWrite = Write | KernelWrite,
|
||||
UserExecute = Execute,
|
||||
|
||||
UserReadWrite = UserRead | UserWrite,
|
||||
UserReadExecute = UserRead | UserExecute,
|
||||
|
||||
IpcLockChangeMask = NotMapped | UserReadWrite
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
|
||||
|
||||
|
||||
@@ -27,10 +27,6 @@ public:
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 addr{};
|
||||
std::size_t num_pages{};
|
||||
|
||||
@@ -368,33 +368,6 @@ ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, st
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
KPageTable& src_page_table, VAddr src_addr) {
|
||||
std::lock_guard lock{page_table_lock};
|
||||
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
// Check that the memory is mapped in the destination process.
|
||||
size_t num_allocator_blocks;
|
||||
R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
|
||||
KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None));
|
||||
|
||||
// Check that the memory is mapped in the source process.
|
||||
R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
|
||||
KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None));
|
||||
|
||||
CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
|
||||
|
||||
// Apply the memory block update.
|
||||
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
|
||||
KMemoryAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) {
|
||||
auto node{page_linked_list.Nodes().begin()};
|
||||
PAddr map_addr{node->GetAddress()};
|
||||
@@ -969,60 +942,6 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
|
||||
std::lock_guard lock{page_table_lock};
|
||||
|
||||
KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
|
||||
|
||||
KMemoryPermission old_perm{};
|
||||
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask,
|
||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
||||
|
||||
block_manager->UpdateLock(
|
||||
addr, size / PageSize,
|
||||
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
|
||||
block->ShareToDevice(permission);
|
||||
},
|
||||
new_perm);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
|
||||
std::lock_guard lock{page_table_lock};
|
||||
|
||||
KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
|
||||
|
||||
KMemoryPermission old_perm{};
|
||||
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::All, KMemoryAttribute::Locked)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
|
||||
|
||||
block_manager->UpdateLock(
|
||||
addr, size / PageSize,
|
||||
[](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
|
||||
block->UnshareToDevice(permission);
|
||||
},
|
||||
new_perm);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
|
||||
|
||||
@@ -1312,42 +1231,4 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const {
|
||||
// Get information about the first block.
|
||||
const VAddr last_addr = addr + size - 1;
|
||||
KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)};
|
||||
KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
// If the start address isn't aligned, we need a block.
|
||||
const size_t blocks_for_start_align =
|
||||
(Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
|
||||
|
||||
while (true) {
|
||||
// Validate against the provided masks.
|
||||
R_TRY(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
|
||||
// Break once we're done.
|
||||
if (last_addr <= info.GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Advance our iterator.
|
||||
it++;
|
||||
info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// If the end address isn't aligned, we need a block.
|
||||
const size_t blocks_for_end_align =
|
||||
(Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
||||
|
||||
if (out_blocks_needed != nullptr) {
|
||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,8 +33,6 @@ public:
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||
@@ -57,8 +55,6 @@ public:
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
@@ -119,10 +115,6 @@ private:
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
ResultCode CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
|
||||
std::recursive_mutex page_table_lock;
|
||||
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||
|
||||
@@ -60,7 +60,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||
thread->GetContext64().cpu_registers[0] = 0;
|
||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||
thread->DisableDispatch();
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
@@ -228,15 +227,12 @@ void KProcess::PinCurrentThread() {
|
||||
const s32 core_id = GetCurrentCoreId(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// If the thread isn't terminated, pin it.
|
||||
if (!cur_thread->IsTerminationRequested()) {
|
||||
// Pin it.
|
||||
PinThread(core_id, cur_thread);
|
||||
cur_thread->Pin();
|
||||
// Pin it.
|
||||
PinThread(core_id, cur_thread);
|
||||
cur_thread->Pin();
|
||||
|
||||
// An update is needed.
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
// An update is needed.
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
void KProcess::UnpinCurrentThread() {
|
||||
@@ -254,20 +250,6 @@ void KProcess::UnpinCurrentThread() {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
void KProcess::UnpinThread(KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the thread's core id.
|
||||
const auto core_id = thread->GetActiveCore();
|
||||
|
||||
// Unpin it.
|
||||
UnpinThread(core_id, thread);
|
||||
thread->Unpin();
|
||||
|
||||
// An update is needed.
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
// Lock ourselves, to prevent concurrent access.
|
||||
|
||||
@@ -347,7 +347,6 @@ public:
|
||||
|
||||
void PinCurrentThread();
|
||||
void UnpinCurrentThread();
|
||||
void UnpinThread(KThread* thread);
|
||||
|
||||
KLightLock& GetStateLock() {
|
||||
return state_lock;
|
||||
|
||||
@@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
|
||||
|
||||
// If the thread is runnable, we want to change its priority in the queue.
|
||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||
GetPriorityQueue(kernel).ChangePriority(old_priority,
|
||||
thread == kernel.GetCurrentEmuThread(), thread);
|
||||
GetPriorityQueue(kernel).ChangePriority(
|
||||
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
@@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
|
||||
}
|
||||
|
||||
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
||||
return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
|
||||
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
||||
}
|
||||
|
||||
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
||||
@@ -376,30 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||
}
|
||||
|
||||
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
||||
scheduler->GetCurrentThread()->DisableDispatch();
|
||||
}
|
||||
|
||||
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
|
||||
GetCurrentThreadPointer(kernel)->DisableDispatch();
|
||||
}
|
||||
|
||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto* current_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
ASSERT(current_thread->GetDisableDispatchCount() >= 1);
|
||||
|
||||
if (current_thread->GetDisableDispatchCount() > 1) {
|
||||
current_thread->EnableDispatch();
|
||||
} else {
|
||||
RescheduleCores(kernel, cores_needing_scheduling);
|
||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
||||
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
||||
scheduler->GetCurrentThread()->EnableDispatch();
|
||||
}
|
||||
}
|
||||
RescheduleCores(kernel, cores_needing_scheduling);
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||
@@ -627,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
|
||||
state.highest_priority_thread = nullptr;
|
||||
}
|
||||
|
||||
void KScheduler::Finalize() {
|
||||
KScheduler::~KScheduler() {
|
||||
if (idle_thread) {
|
||||
idle_thread->Close();
|
||||
idle_thread = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
KScheduler::~KScheduler() {
|
||||
ASSERT(!idle_thread);
|
||||
}
|
||||
|
||||
KThread* KScheduler::GetCurrentThread() const {
|
||||
if (auto result = current_thread.load(); result) {
|
||||
return result;
|
||||
@@ -656,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
if (phys_core.IsInterrupted()) {
|
||||
phys_core.ClearInterrupt();
|
||||
}
|
||||
|
||||
guard.Lock();
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
GetCurrentThread()->EnableDispatch();
|
||||
guard.Unlock();
|
||||
}
|
||||
}
|
||||
@@ -671,33 +655,26 @@ void KScheduler::OnThreadStart() {
|
||||
}
|
||||
|
||||
void KScheduler::Unload(KThread* thread) {
|
||||
ASSERT(thread);
|
||||
|
||||
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||
|
||||
if (thread->IsCallingSvc()) {
|
||||
thread->ClearIsCallingSvc();
|
||||
if (thread) {
|
||||
if (thread->IsCallingSvc()) {
|
||||
thread->ClearIsCallingSvc();
|
||||
}
|
||||
if (!thread->IsTerminationRequested()) {
|
||||
prev_thread = thread;
|
||||
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.SaveContext(thread->GetContext32());
|
||||
cpu_core.SaveContext(thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
} else {
|
||||
prev_thread = nullptr;
|
||||
}
|
||||
thread->context_guard.Unlock();
|
||||
}
|
||||
|
||||
auto& physical_core = system.Kernel().PhysicalCore(core_id);
|
||||
if (!physical_core.IsInitialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
|
||||
cpu_core.SaveContext(thread->GetContext32());
|
||||
cpu_core.SaveContext(thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
|
||||
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
|
||||
prev_thread = thread;
|
||||
} else {
|
||||
prev_thread = nullptr;
|
||||
}
|
||||
|
||||
thread->context_guard.Unlock();
|
||||
}
|
||||
|
||||
void KScheduler::Reload(KThread* thread) {
|
||||
@@ -706,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||
|
||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
||||
if (thread_owner_process != nullptr) {
|
||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||
}
|
||||
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.LoadContext(thread->GetContext32());
|
||||
cpu_core.LoadContext(thread->GetContext64());
|
||||
@@ -723,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
KThread* previous_thread = GetCurrentThread();
|
||||
KThread* previous_thread = current_thread.load();
|
||||
KThread* next_thread = state.highest_priority_thread;
|
||||
|
||||
state.needs_scheduling = false;
|
||||
@@ -735,15 +717,10 @@ void KScheduler::ScheduleImpl() {
|
||||
|
||||
// If we're not actually switching thread, there's nothing to do.
|
||||
if (next_thread == current_thread.load()) {
|
||||
previous_thread->EnableDispatch();
|
||||
guard.Unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (next_thread->GetCurrentCore() != core_id) {
|
||||
next_thread->SetCurrentCore(core_id);
|
||||
}
|
||||
|
||||
current_thread.store(next_thread);
|
||||
|
||||
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
||||
@@ -754,7 +731,11 @@ void KScheduler::ScheduleImpl() {
|
||||
Unload(previous_thread);
|
||||
|
||||
std::shared_ptr<Common::Fiber>* old_context;
|
||||
old_context = &previous_thread->GetHostContext();
|
||||
if (previous_thread != nullptr) {
|
||||
old_context = &previous_thread->GetHostContext();
|
||||
} else {
|
||||
old_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
guard.Unlock();
|
||||
|
||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||
|
||||
@@ -33,8 +33,6 @@ public:
|
||||
explicit KScheduler(Core::System& system_, s32 core_id_);
|
||||
~KScheduler();
|
||||
|
||||
void Finalize();
|
||||
|
||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||
void RescheduleCurrentCore();
|
||||
|
||||
|
||||
@@ -23,11 +23,6 @@ public:
|
||||
}
|
||||
|
||||
void Lock() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsLockedByCurrentThread()) {
|
||||
// If we already own the lock, we can just increment the count.
|
||||
ASSERT(lock_count > 0);
|
||||
@@ -48,11 +43,6 @@ public:
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(IsLockedByCurrentThread());
|
||||
ASSERT(lock_count > 0);
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
@@ -175,7 +175,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!context.IsThreadWaiting()) {
|
||||
context.GetThread().EndWait(result);
|
||||
context.GetThread().Wakeup();
|
||||
context.GetThread().SetSyncedObject(nullptr, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,66 +8,11 @@
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
|
||||
public:
|
||||
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
|
||||
KSynchronizationObject::ThreadListNode* n, s32 c)
|
||||
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
|
||||
|
||||
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||
ResultCode wait_result) override {
|
||||
// Determine the sync index, and unlink all nodes.
|
||||
s32 sync_index = -1;
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
// Check if this is the signaled object.
|
||||
if (m_objects[i] == signaled_object && sync_index == -1) {
|
||||
sync_index = i;
|
||||
}
|
||||
|
||||
// Unlink the current node from the current object.
|
||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||
}
|
||||
|
||||
// Set the waiting thread's sync index.
|
||||
waiting_thread->SetSyncedIndex(sync_index);
|
||||
|
||||
// Set the waiting thread as not cancellable.
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
// Invoke the base end wait handler.
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Remove all nodes from our list.
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||
}
|
||||
|
||||
// Set the waiting thread as not cancellable.
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
|
||||
private:
|
||||
KSynchronizationObject** m_objects;
|
||||
KSynchronizationObject::ThreadListNode* m_nodes;
|
||||
s32 m_count;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void KSynchronizationObject::Finalize() {
|
||||
this->OnFinalizeSynchronizationObject();
|
||||
KAutoObject::Finalize();
|
||||
@@ -80,19 +25,11 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||
|
||||
// Prepare for wait.
|
||||
KThread* thread = GetCurrentThreadPointer(kernel_ctx);
|
||||
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
|
||||
thread_nodes.data(), num_objects);
|
||||
KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
// Setup the scheduling lock and sleep.
|
||||
KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout};
|
||||
|
||||
// Check if any of the objects are already signaled.
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
@@ -111,6 +48,12 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||
return ResultTimedOut;
|
||||
}
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Check if waiting was canceled.
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
@@ -123,25 +66,73 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||
thread_nodes[i].thread = thread;
|
||||
thread_nodes[i].next = nullptr;
|
||||
|
||||
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
|
||||
if (objects[i]->thread_list_tail == nullptr) {
|
||||
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
||||
} else {
|
||||
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
// Mark the thread as cancellable.
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||
|
||||
// Mark the thread as waiting.
|
||||
thread->SetCancellable();
|
||||
|
||||
// Clear the thread's synced index.
|
||||
thread->SetSyncedIndex(-1);
|
||||
|
||||
// Wait for an object to be signaled.
|
||||
thread->BeginWait(std::addressof(wait_queue));
|
||||
thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||
}
|
||||
|
||||
// Set the output index.
|
||||
*out_index = thread->GetSyncedIndex();
|
||||
// The lock/sleep is done, so we should be able to get our result.
|
||||
|
||||
// Thread is no longer cancellable.
|
||||
thread->ClearCancellable();
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({});
|
||||
|
||||
// Cancel the timer as needed.
|
||||
kernel_ctx.TimeManager().UnscheduleTimeEvent(thread);
|
||||
|
||||
// Get the wait result.
|
||||
return thread->GetWaitResult();
|
||||
ResultCode wait_result{ResultSuccess};
|
||||
s32 sync_index = -1;
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel_ctx);
|
||||
KSynchronizationObject* synced_obj;
|
||||
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
// Unlink the object from the list.
|
||||
ThreadListNode* prev_ptr =
|
||||
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
||||
ThreadListNode* prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||
|
||||
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||
objects[i]->thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = thread_nodes[i].next;
|
||||
|
||||
if (objects[i] == synced_obj) {
|
||||
sync_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set output.
|
||||
*out_index = sync_index;
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||
@@ -150,7 +141,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||
KSynchronizationObject::~KSynchronizationObject() = default;
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// If we're not signaled, we've nothing to notify.
|
||||
if (!this->IsSignaled()) {
|
||||
@@ -159,7 +150,11 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
|
||||
// Iterate over each thread.
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
cur_node->thread->NotifyAvailable(this, result);
|
||||
KThread* thread = cur_node->thread;
|
||||
if (thread->GetState() == ThreadState::Waiting) {
|
||||
thread->SetSyncedObject(this, result);
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -35,38 +35,6 @@ public:
|
||||
|
||||
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
||||
|
||||
void LinkNode(ThreadListNode* node_) {
|
||||
// Link the node to the list.
|
||||
if (thread_list_tail == nullptr) {
|
||||
thread_list_head = node_;
|
||||
} else {
|
||||
thread_list_tail->next = node_;
|
||||
}
|
||||
|
||||
thread_list_tail = node_;
|
||||
}
|
||||
|
||||
void UnlinkNode(ThreadListNode* node_) {
|
||||
// Unlink the node from the list.
|
||||
ThreadListNode* prev_ptr =
|
||||
reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
|
||||
ThreadListNode* prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != node_);
|
||||
|
||||
if (thread_list_tail == node_) {
|
||||
thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = node_->next;
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit KSynchronizationObject(KernelCore& kernel);
|
||||
~KSynchronizationObject() override;
|
||||
|
||||
@@ -13,9 +13,6 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/thread_queue_list.h"
|
||||
#include "core/core.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hardware_properties.h"
|
||||
@@ -59,34 +56,6 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
|
||||
public:
|
||||
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
|
||||
: KThreadQueueWithoutEndWait(kernel_) {}
|
||||
};
|
||||
|
||||
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
// Remove the thread from the wait list.
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
// Invoke the base cancel wait handler.
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
|
||||
private:
|
||||
KThread::WaiterList* m_wait_list;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
KThread::KThread(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
||||
KThread::~KThread() = default;
|
||||
@@ -113,8 +82,6 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
[[fallthrough]];
|
||||
case ThreadType::HighPriority:
|
||||
[[fallthrough]];
|
||||
case ThreadType::Dummy:
|
||||
[[fallthrough]];
|
||||
case ThreadType::User:
|
||||
ASSERT(((owner == nullptr) ||
|
||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||
@@ -160,8 +127,11 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
priority = prio;
|
||||
base_priority = prio;
|
||||
|
||||
// Set sync object and waiting lock to null.
|
||||
synced_object = nullptr;
|
||||
|
||||
// Initialize sleeping queue.
|
||||
wait_queue = nullptr;
|
||||
sleeping_queue = nullptr;
|
||||
|
||||
// Set suspend flags.
|
||||
suspend_request_flags = 0;
|
||||
@@ -214,7 +184,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
// Setup the stack parameters.
|
||||
StackParameters& sp = GetStackParameters();
|
||||
sp.cur_thread = this;
|
||||
sp.disable_count = 0;
|
||||
sp.disable_count = 1;
|
||||
SetInExceptionHandler();
|
||||
|
||||
// Set thread ID.
|
||||
@@ -241,16 +211,15 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
|
||||
// Initialize the thread.
|
||||
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
||||
|
||||
// Initialize emulation parameters.
|
||||
// Initialize host context.
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
|
||||
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
||||
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
|
||||
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
|
||||
}
|
||||
|
||||
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
@@ -304,14 +273,11 @@ void KThread::Finalize() {
|
||||
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
// Clear the lock owner
|
||||
// The thread shouldn't be a kernel waiter.
|
||||
it->SetLockOwner(nullptr);
|
||||
|
||||
// Erase the waiter from our list.
|
||||
it->SetSyncedObject(nullptr, ResultInvalidState);
|
||||
it->Wakeup();
|
||||
it = waiter_list.erase(it);
|
||||
|
||||
// Cancel the thread's wait.
|
||||
it->CancelWait(ResultInvalidState, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,12 +294,15 @@ bool KThread::IsSignaled() const {
|
||||
return signaled;
|
||||
}
|
||||
|
||||
void KThread::OnTimer() {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
void KThread::Wakeup() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// If we're waiting, cancel the wait.
|
||||
if (GetState() == ThreadState::Waiting) {
|
||||
wait_queue->CancelWait(this, ResultTimedOut, false);
|
||||
if (sleeping_queue != nullptr) {
|
||||
sleeping_queue->WakeupThread(this);
|
||||
} else {
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,7 +327,7 @@ void KThread::StartTermination() {
|
||||
|
||||
// Signal.
|
||||
signaled = true;
|
||||
KSynchronizationObject::NotifyAvailable();
|
||||
NotifyAvailable();
|
||||
|
||||
// Clear previous thread in KScheduler.
|
||||
KScheduler::ClearPreviousThread(kernel, this);
|
||||
@@ -506,32 +475,30 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||
ASSERT(parent != nullptr);
|
||||
ASSERT(v_affinity_mask != 0);
|
||||
KScopedLightLock lk(activity_pause_lock);
|
||||
KScopedLightLock lk{activity_pause_lock};
|
||||
|
||||
// Set the core mask.
|
||||
u64 p_affinity_mask = 0;
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
ASSERT(num_core_migration_disables >= 0);
|
||||
|
||||
// If we're updating, set our ideal virtual core.
|
||||
if (core_id_ != Svc::IdealCoreNoUpdate) {
|
||||
virtual_ideal_core_id = core_id_;
|
||||
} else {
|
||||
// Preserve our ideal core id.
|
||||
core_id_ = virtual_ideal_core_id;
|
||||
R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
|
||||
// If the core id is no-update magic, preserve the ideal core id.
|
||||
if (cpu_core_id == Svc::IdealCoreNoUpdate) {
|
||||
cpu_core_id = virtual_ideal_core_id;
|
||||
R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
|
||||
}
|
||||
|
||||
// Set our affinity mask.
|
||||
// Set the virtual core/affinity mask.
|
||||
virtual_ideal_core_id = cpu_core_id;
|
||||
virtual_affinity_mask = v_affinity_mask;
|
||||
|
||||
// Translate the virtual core to a physical core.
|
||||
if (core_id_ >= 0) {
|
||||
core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
|
||||
if (cpu_core_id >= 0) {
|
||||
cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id];
|
||||
}
|
||||
|
||||
// Translate the virtual affinity mask to a physical one.
|
||||
@@ -546,7 +513,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
const KAffinityMask old_mask = physical_affinity_mask;
|
||||
|
||||
// Set our new ideals.
|
||||
physical_ideal_core_id = core_id_;
|
||||
physical_ideal_core_id = cpu_core_id;
|
||||
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||
|
||||
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||
@@ -564,18 +531,18 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we edit the original affinity for restoration later.
|
||||
original_physical_ideal_core_id = core_id_;
|
||||
original_physical_ideal_core_id = cpu_core_id;
|
||||
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the pinned waiter list.
|
||||
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
|
||||
{
|
||||
bool retry_update{};
|
||||
bool thread_is_pinned{};
|
||||
do {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Don't do any further management if our termination has been requested.
|
||||
R_SUCCEED_IF(IsTerminationRequested());
|
||||
@@ -603,9 +570,12 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||
ResultTerminationRequested);
|
||||
|
||||
// Note that the thread was pinned.
|
||||
thread_is_pinned = true;
|
||||
|
||||
// Wait until the thread isn't pinned any more.
|
||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||
} else {
|
||||
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
||||
// not current.
|
||||
@@ -613,6 +583,16 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
}
|
||||
}
|
||||
} while (retry_update);
|
||||
|
||||
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
||||
// our waiter list.
|
||||
if (thread_is_pinned) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Remove from the list.
|
||||
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
@@ -661,9 +641,15 @@ void KThread::WaitCancel() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Check if we're waiting and cancellable.
|
||||
if (this->GetState() == ThreadState::Waiting && cancellable) {
|
||||
wait_cancelled = false;
|
||||
wait_queue->CancelWait(this, ResultCancelled, true);
|
||||
if (GetState() == ThreadState::Waiting && cancellable) {
|
||||
if (sleeping_queue != nullptr) {
|
||||
sleeping_queue->WakeupThread(this);
|
||||
wait_cancelled = true;
|
||||
} else {
|
||||
SetSyncedObject(nullptr, ResultCancelled);
|
||||
SetState(ThreadState::Runnable);
|
||||
wait_cancelled = false;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, note that we cancelled a wait.
|
||||
wait_cancelled = true;
|
||||
@@ -714,59 +700,60 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
// Set the activity.
|
||||
{
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Verify our state.
|
||||
const auto cur_state = this->GetState();
|
||||
const auto cur_state = GetState();
|
||||
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
||||
ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == Svc::ThreadActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
|
||||
// Suspend.
|
||||
this->RequestSuspend(SuspendType::Thread);
|
||||
RequestSuspend(SuspendType::Thread);
|
||||
} else {
|
||||
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
|
||||
// Resume.
|
||||
this->Resume(SuspendType::Thread);
|
||||
Resume(SuspendType::Thread);
|
||||
}
|
||||
}
|
||||
|
||||
// If the thread is now paused, update the pinned waiter list.
|
||||
if (activity == Svc::ThreadActivity::Paused) {
|
||||
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
|
||||
std::addressof(pinned_waiter_list));
|
||||
|
||||
bool thread_is_current;
|
||||
bool thread_is_pinned{};
|
||||
bool thread_is_current{};
|
||||
do {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Don't do any further management if our termination has been requested.
|
||||
R_SUCCEED_IF(this->IsTerminationRequested());
|
||||
|
||||
// By default, treat the thread as not current.
|
||||
thread_is_current = false;
|
||||
R_SUCCEED_IF(IsTerminationRequested());
|
||||
|
||||
// Check whether the thread is pinned.
|
||||
if (this->GetStackParameters().is_pinned) {
|
||||
if (GetStackParameters().is_pinned) {
|
||||
// Verify that the current thread isn't terminating.
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||
ResultTerminationRequested);
|
||||
|
||||
// Note that the thread was pinned and not current.
|
||||
thread_is_pinned = true;
|
||||
thread_is_current = false;
|
||||
|
||||
// Wait until the thread isn't pinned any more.
|
||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
||||
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||
} else {
|
||||
// Check if the thread is currently running.
|
||||
// If it is, we'll need to retry.
|
||||
thread_is_current = false;
|
||||
|
||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
||||
thread_is_current = true;
|
||||
@@ -775,6 +762,16 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
}
|
||||
}
|
||||
} while (thread_is_current);
|
||||
|
||||
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
||||
// our waiter list.
|
||||
if (thread_is_pinned) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Remove from the list.
|
||||
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
@@ -969,9 +966,6 @@ ResultCode KThread::Run() {
|
||||
|
||||
// Set our state and finish.
|
||||
SetState(ThreadState::Runnable);
|
||||
|
||||
DisableDispatch();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
}
|
||||
@@ -1002,63 +996,29 @@ ResultCode KThread::Sleep(s64 timeout) {
|
||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||
ASSERT(timeout > 0);
|
||||
|
||||
ThreadQueueImplForKThreadSleep wait_queue_(kernel);
|
||||
{
|
||||
// Setup the scheduling lock and sleep.
|
||||
KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
|
||||
KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (this->IsTerminationRequested()) {
|
||||
if (IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Wait for the sleep to end.
|
||||
this->BeginWait(std::addressof(wait_queue_));
|
||||
// Mark the thread as waiting.
|
||||
SetState(ThreadState::Waiting);
|
||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||
}
|
||||
|
||||
// The lock/sleep is done.
|
||||
|
||||
// Cancel the timer.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(this);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void KThread::BeginWait(KThreadQueue* queue) {
|
||||
// Set our state as waiting.
|
||||
SetState(ThreadState::Waiting);
|
||||
|
||||
// Set our wait queue.
|
||||
wait_queue = queue;
|
||||
}
|
||||
|
||||
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// If we're waiting, notify our queue that we're available.
|
||||
if (GetState() == ThreadState::Waiting) {
|
||||
wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::EndWait(ResultCode wait_result_) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// If we're waiting, notify our queue that we're available.
|
||||
if (GetState() == ThreadState::Waiting) {
|
||||
wait_queue->EndWait(this, wait_result_);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
|
||||
// Lock the scheduler.
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// If we're waiting, notify our queue that we're available.
|
||||
if (GetState() == ThreadState::Waiting) {
|
||||
wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::SetState(ThreadState state) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
@@ -1090,26 +1050,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
|
||||
return GetCurrentThread(kernel).GetCurrentCore();
|
||||
}
|
||||
|
||||
KScopedDisableDispatch::~KScopedDisableDispatch() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip the reschedule if single-core, as dispatch tracking is disabled here.
|
||||
if (!Settings::values.use_multi_core.GetValue()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
||||
auto scheduler = kernel.CurrentScheduler();
|
||||
|
||||
if (scheduler) {
|
||||
scheduler->RescheduleCurrentCore();
|
||||
}
|
||||
} else {
|
||||
GetCurrentThread(kernel).EnableDispatch();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -48,7 +48,6 @@ enum class ThreadType : u32 {
|
||||
Kernel = 1,
|
||||
HighPriority = 2,
|
||||
User = 3,
|
||||
Dummy = 100, // Special thread type for emulation purposes only
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
||||
|
||||
@@ -162,6 +161,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void Wakeup();
|
||||
|
||||
void SetBasePriority(s32 value);
|
||||
|
||||
[[nodiscard]] ResultCode Run();
|
||||
@@ -196,19 +197,13 @@ public:
|
||||
|
||||
void Suspend();
|
||||
|
||||
constexpr void SetSyncedIndex(s32 index) {
|
||||
synced_index = index;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s32 GetSyncedIndex() const {
|
||||
return synced_index;
|
||||
}
|
||||
|
||||
constexpr void SetWaitResult(ResultCode wait_res) {
|
||||
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
|
||||
synced_object = obj;
|
||||
wait_result = wait_res;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr ResultCode GetWaitResult() const {
|
||||
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||
*out = synced_object;
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
@@ -379,8 +374,6 @@ public:
|
||||
|
||||
[[nodiscard]] bool IsSignaled() const override;
|
||||
|
||||
void OnTimer();
|
||||
|
||||
static void PostDestroy(uintptr_t arg);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
||||
@@ -453,39 +446,20 @@ public:
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsKernelThread() const {
|
||||
return GetActiveCore() == 3;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
|
||||
return is_single_core || IsKernelThread();
|
||||
void SetSleepingQueue(KThreadQueue* q) {
|
||||
sleeping_queue = q;
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
||||
if (IsDispatchTrackingDisabled()) {
|
||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||
return 1;
|
||||
}
|
||||
|
||||
return this->GetStackParameters().disable_count;
|
||||
}
|
||||
|
||||
void DisableDispatch() {
|
||||
if (IsDispatchTrackingDisabled()) {
|
||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
||||
this->GetStackParameters().disable_count++;
|
||||
}
|
||||
|
||||
void EnableDispatch() {
|
||||
if (IsDispatchTrackingDisabled()) {
|
||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
||||
this->GetStackParameters().disable_count--;
|
||||
}
|
||||
@@ -599,15 +573,6 @@ public:
|
||||
address_key_value = val;
|
||||
}
|
||||
|
||||
void ClearWaitQueue() {
|
||||
wait_queue = nullptr;
|
||||
}
|
||||
|
||||
void BeginWait(KThreadQueue* queue);
|
||||
void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
|
||||
void EndWait(ResultCode wait_result_);
|
||||
void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
}
|
||||
@@ -702,6 +667,7 @@ private:
|
||||
KAffinityMask physical_affinity_mask{};
|
||||
u64 thread_id{};
|
||||
std::atomic<s64> cpu_time{};
|
||||
KSynchronizationObject* synced_object{};
|
||||
VAddr address_key{};
|
||||
KProcess* parent{};
|
||||
VAddr kernel_stack_top{};
|
||||
@@ -711,14 +677,13 @@ private:
|
||||
s64 schedule_count{};
|
||||
s64 last_scheduled_tick{};
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
KThreadQueue* wait_queue{};
|
||||
KThreadQueue* sleeping_queue{};
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
KThread* lock_owner{};
|
||||
u32 address_key_value{};
|
||||
u32 suspend_request_flags{};
|
||||
u32 suspend_allowed_flags{};
|
||||
s32 synced_index{};
|
||||
ResultCode wait_result{ResultSuccess};
|
||||
s32 base_priority{};
|
||||
s32 physical_ideal_core_id{};
|
||||
@@ -743,7 +708,6 @@ private:
|
||||
|
||||
// For emulation
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
bool is_single_core{};
|
||||
|
||||
// For debugging
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
@@ -788,20 +752,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class KScopedDisableDispatch {
|
||||
public:
|
||||
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (kernel.IsShuttingDown()) {
|
||||
return;
|
||||
}
|
||||
GetCurrentThread(kernel).DisableDispatch();
|
||||
}
|
||||
|
||||
~KScopedDisableDispatch();
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
|
||||
[[maybe_unused]] KSynchronizationObject* signaled_object,
|
||||
[[maybe_unused]] ResultCode wait_result) {}
|
||||
|
||||
void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
|
||||
// Set the thread's wait result.
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
// Set the thread as runnable.
|
||||
waiting_thread->SetState(ThreadState::Runnable);
|
||||
|
||||
// Clear the thread's wait queue.
|
||||
waiting_thread->ClearWaitQueue();
|
||||
|
||||
// Cancel the thread task.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
|
||||
}
|
||||
|
||||
void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) {
|
||||
// Set the thread's wait result.
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
// Set the thread as runnable.
|
||||
waiting_thread->SetState(ThreadState::Runnable);
|
||||
|
||||
// Clear the thread's wait queue.
|
||||
waiting_thread->ClearWaitQueue();
|
||||
|
||||
// Cancel the thread task.
|
||||
if (cancel_timer_task) {
|
||||
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
|
||||
[[maybe_unused]] ResultCode wait_result) {}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -12,24 +11,71 @@ namespace Kernel {
|
||||
class KThreadQueue {
|
||||
public:
|
||||
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
virtual ~KThreadQueue() = default;
|
||||
|
||||
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
||||
ResultCode wait_result);
|
||||
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
|
||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task);
|
||||
bool IsEmpty() const {
|
||||
return wait_list.empty();
|
||||
}
|
||||
|
||||
KThread::WaiterList::iterator begin() {
|
||||
return wait_list.begin();
|
||||
}
|
||||
KThread::WaiterList::iterator end() {
|
||||
return wait_list.end();
|
||||
}
|
||||
|
||||
bool SleepThread(KThread* t) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// If the thread needs terminating, don't enqueue it.
|
||||
if (t->IsTerminationRequested()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set the thread's queue and mark it as waiting.
|
||||
t->SetSleepingQueue(this);
|
||||
t->SetState(ThreadState::Waiting);
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(*t);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WakeupThread(KThread* t) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Remove the thread from the queue.
|
||||
wait_list.erase(wait_list.iterator_to(*t));
|
||||
|
||||
// Mark the thread as no longer sleeping.
|
||||
t->SetState(ThreadState::Runnable);
|
||||
t->SetSleepingQueue(nullptr);
|
||||
}
|
||||
|
||||
KThread* WakeupFrontThread() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (wait_list.empty()) {
|
||||
return nullptr;
|
||||
} else {
|
||||
// Remove the thread from the queue.
|
||||
auto it = wait_list.begin();
|
||||
KThread* thread = std::addressof(*it);
|
||||
wait_list.erase(it);
|
||||
|
||||
ASSERT(thread->GetState() == ThreadState::Waiting);
|
||||
|
||||
// Mark the thread as no longer sleeping.
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
thread->SetSleepingQueue(nullptr);
|
||||
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
KThread::WaiterList wait_list{};
|
||||
};
|
||||
|
||||
class KThreadQueueWithoutEndWait : public KThreadQueue {
|
||||
public:
|
||||
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
|
||||
void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
@@ -84,16 +83,12 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void InitializeCores() {
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
cores[core_id].Initialize(current_process->Is64BitProcess());
|
||||
system.Memory().SetCurrentPageTable(*current_process, core_id);
|
||||
for (auto& core : cores) {
|
||||
core.Initialize(current_process->Is64BitProcess());
|
||||
}
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||
|
||||
process_list.clear();
|
||||
|
||||
// Close all open server ports.
|
||||
@@ -128,6 +123,15 @@ struct KernelCore::Impl {
|
||||
next_user_process_id = KProcess::ProcessIDMin;
|
||||
next_thread_id = 1;
|
||||
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
if (suspend_threads[core_id]) {
|
||||
suspend_threads[core_id]->Close();
|
||||
suspend_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
schedulers[core_id].reset();
|
||||
}
|
||||
|
||||
cores.clear();
|
||||
|
||||
global_handle_table->Finalize();
|
||||
@@ -155,16 +159,6 @@ struct KernelCore::Impl {
|
||||
CleanupObject(time_shared_mem);
|
||||
CleanupObject(system_resource_limit);
|
||||
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
if (suspend_threads[core_id]) {
|
||||
suspend_threads[core_id]->Close();
|
||||
suspend_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
schedulers[core_id]->Finalize();
|
||||
schedulers[core_id].reset();
|
||||
}
|
||||
|
||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||
|
||||
@@ -251,11 +245,13 @@ struct KernelCore::Impl {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
global_scheduler_context->PreemptThreads();
|
||||
}
|
||||
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
||||
const auto time_interval = std::chrono::nanoseconds{
|
||||
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
});
|
||||
|
||||
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
||||
const auto time_interval =
|
||||
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
}
|
||||
|
||||
@@ -271,6 +267,14 @@ struct KernelCore::Impl {
|
||||
|
||||
void MakeCurrentProcess(KProcess* process) {
|
||||
current_process = process;
|
||||
if (process == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
const u32 core_id = GetCurrentHostThreadID();
|
||||
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
||||
system.Memory().SetCurrentPageTable(*process, core_id);
|
||||
}
|
||||
}
|
||||
|
||||
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
||||
@@ -340,16 +344,7 @@ struct KernelCore::Impl {
|
||||
is_phantom_mode_for_singlecore = value;
|
||||
}
|
||||
|
||||
bool IsShuttingDown() const {
|
||||
return is_shutting_down.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
KThread* GetCurrentEmuThread() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (IsShuttingDown()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto thread_id = GetCurrentHostThreadID();
|
||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||
return GetHostDummyThread();
|
||||
@@ -765,7 +760,6 @@ struct KernelCore::Impl {
|
||||
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
||||
|
||||
bool is_multicore{};
|
||||
std::atomic_bool is_shutting_down{};
|
||||
bool is_phantom_mode_for_singlecore{};
|
||||
u32 single_core_thread_id{};
|
||||
|
||||
@@ -851,20 +845,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
||||
return impl->cores[id];
|
||||
}
|
||||
|
||||
size_t KernelCore::CurrentPhysicalCoreIndex() const {
|
||||
const u32 core_id = impl->GetCurrentHostThreadID();
|
||||
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||
return Core::Hardware::NUM_CPU_CORES - 1;
|
||||
}
|
||||
return core_id;
|
||||
}
|
||||
|
||||
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
||||
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||
u32 core_id = impl->GetCurrentHostThreadID();
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return impl->cores[core_id];
|
||||
}
|
||||
|
||||
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||
return impl->cores[CurrentPhysicalCoreIndex()];
|
||||
u32 core_id = impl->GetCurrentHostThreadID();
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return impl->cores[core_id];
|
||||
}
|
||||
|
||||
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||
@@ -1067,9 +1057,6 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||
impl->suspend_threads[core_id]->SetState(state);
|
||||
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||
ThreadWaitReasonForDebugging::Suspended);
|
||||
if (!should_suspend) {
|
||||
impl->suspend_threads[core_id]->DisableDispatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1078,21 +1065,19 @@ bool KernelCore::IsMulticore() const {
|
||||
return impl->is_multicore;
|
||||
}
|
||||
|
||||
bool KernelCore::IsShuttingDown() const {
|
||||
return impl->IsShuttingDown();
|
||||
}
|
||||
|
||||
void KernelCore::ExceptionalExit() {
|
||||
exception_exited = true;
|
||||
Suspend(true);
|
||||
}
|
||||
|
||||
void KernelCore::EnterSVCProfile() {
|
||||
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||
std::size_t core = impl->GetCurrentHostThreadID();
|
||||
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||
}
|
||||
|
||||
void KernelCore::ExitSVCProfile() {
|
||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
|
||||
std::size_t core = impl->GetCurrentHostThreadID();
|
||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
||||
}
|
||||
|
||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||
|
||||
@@ -53,7 +53,6 @@ class KSharedMemoryInfo;
|
||||
class KThread;
|
||||
class KTransferMemory;
|
||||
class KWritableEvent;
|
||||
class KCodeMemory;
|
||||
class PhysicalCore;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
@@ -149,9 +148,6 @@ public:
|
||||
/// Gets the an instance of the respective physical CPU core.
|
||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||
|
||||
/// Gets the current physical core index for the running host thread.
|
||||
std::size_t CurrentPhysicalCoreIndex() const;
|
||||
|
||||
/// Gets the sole instance of the Scheduler at the current running core.
|
||||
Kernel::KScheduler* CurrentScheduler();
|
||||
|
||||
@@ -275,8 +271,6 @@ public:
|
||||
|
||||
bool IsMulticore() const;
|
||||
|
||||
bool IsShuttingDown() const;
|
||||
|
||||
void EnterSVCProfile();
|
||||
|
||||
void ExitSVCProfile();
|
||||
@@ -332,8 +326,6 @@ public:
|
||||
return slab_heap_container->transfer_memory;
|
||||
} else if constexpr (std::is_same_v<T, KWritableEvent>) {
|
||||
return slab_heap_container->writeable_event;
|
||||
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
|
||||
return slab_heap_container->code_memory;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,7 +377,6 @@ private:
|
||||
KSlabHeap<KThread> thread;
|
||||
KSlabHeap<KTransferMemory> transfer_memory;
|
||||
KSlabHeap<KWritableEvent> writeable_event;
|
||||
KSlabHeap<KCodeMemory> code_memory;
|
||||
};
|
||||
|
||||
std::unique_ptr<SlabHeapContainer> slab_heap_container;
|
||||
|
||||
@@ -25,27 +25,24 @@ public:
|
||||
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
std::vector<std::jthread> threads;
|
||||
std::vector<std::thread> threads;
|
||||
std::queue<std::function<void()>> requests;
|
||||
std::mutex queue_mutex;
|
||||
std::condition_variable_any condition;
|
||||
std::condition_variable condition;
|
||||
const std::string service_name;
|
||||
bool stop{};
|
||||
};
|
||||
|
||||
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||
: service_name{name} {
|
||||
for (std::size_t i = 0; i < num_threads; ++i) {
|
||||
threads.emplace_back([this, &kernel](std::stop_token stop_token) {
|
||||
for (std::size_t i = 0; i < num_threads; ++i)
|
||||
threads.emplace_back([this, &kernel] {
|
||||
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
||||
|
||||
// Wait for first request before trying to acquire a render context
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
||||
}
|
||||
|
||||
if (stop_token.stop_requested()) {
|
||||
return;
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
}
|
||||
|
||||
kernel.RegisterHostThread();
|
||||
@@ -55,16 +52,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
||||
|
||||
if (stop_token.stop_requested()) {
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
if (stop || requests.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (requests.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
task = std::move(requests.front());
|
||||
requests.pop();
|
||||
}
|
||||
@@ -72,7 +63,6 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||
task();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||
@@ -98,9 +88,12 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||
}
|
||||
|
||||
ServiceThread::Impl::~Impl() {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
stop = true;
|
||||
}
|
||||
condition.notify_all();
|
||||
for (auto& thread : threads) {
|
||||
thread.request_stop();
|
||||
for (std::thread& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/k_client_port.h"
|
||||
#include "core/hle/kernel/k_client_session.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
@@ -32,7 +31,6 @@
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
@@ -309,29 +307,26 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||
|
||||
/// Makes a blocking IPC call to an OS service.
|
||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Create the wait queue.
|
||||
KThreadQueue wait_queue(kernel);
|
||||
|
||||
// Get the client session from its handle.
|
||||
KScopedAutoObject session =
|
||||
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
||||
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||
|
||||
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||
|
||||
// This is a synchronous request, so we should wait for our request to complete.
|
||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
|
||||
GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
|
||||
{
|
||||
KScopedAutoObject session =
|
||||
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
||||
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||
session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
|
||||
}
|
||||
}
|
||||
|
||||
return thread->GetWaitResult();
|
||||
KSynchronizationObject* dummy{};
|
||||
return thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
||||
@@ -878,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||
const u64 thread_ticks = current_thread->GetCpuTime();
|
||||
|
||||
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
||||
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
||||
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
||||
}
|
||||
|
||||
@@ -892,8 +887,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
|
||||
info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
|
||||
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) {
|
||||
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
@@ -1203,22 +1197,6 @@ constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
|
||||
}
|
||||
}
|
||||
|
||||
constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) {
|
||||
return perm == Svc::MemoryPermission::ReadWrite;
|
||||
}
|
||||
|
||||
constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
|
||||
return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute;
|
||||
}
|
||||
|
||||
constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) {
|
||||
return perm == Svc::MemoryPermission::None;
|
||||
}
|
||||
|
||||
constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
|
||||
return perm == Svc::MemoryPermission::None;
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
|
||||
@@ -1328,195 +1306,6 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
|
||||
return page_table.SetProcessMemoryPermission(address, size, ConvertToKMemoryPermission(perm));
|
||||
}
|
||||
|
||||
static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
|
||||
dst_address, process_handle, src_address, size);
|
||||
|
||||
// Validate the address/size.
|
||||
R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
|
||||
R_UNLESS(size > 0, ResultInvalidSize);
|
||||
R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
|
||||
R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Get the processes.
|
||||
KProcess* dst_process = system.CurrentProcess();
|
||||
KScopedAutoObject src_process =
|
||||
dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
|
||||
R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
|
||||
|
||||
// Get the page tables.
|
||||
auto& dst_pt = dst_process->PageTable();
|
||||
auto& src_pt = src_process->PageTable();
|
||||
|
||||
// Validate that the mapping is in range.
|
||||
R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Create a new page group.
|
||||
KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
|
||||
KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
|
||||
|
||||
// Map the group.
|
||||
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
|
||||
KMemoryPermission::UserReadWrite));
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
|
||||
VAddr src_address, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
|
||||
dst_address, process_handle, src_address, size);
|
||||
|
||||
// Validate the address/size.
|
||||
R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
|
||||
R_UNLESS(size > 0, ResultInvalidSize);
|
||||
R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
|
||||
R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Get the processes.
|
||||
KProcess* dst_process = system.CurrentProcess();
|
||||
KScopedAutoObject src_process =
|
||||
dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
|
||||
R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
|
||||
|
||||
// Get the page tables.
|
||||
auto& dst_pt = dst_process->PageTable();
|
||||
auto& src_pt = src_process->PageTable();
|
||||
|
||||
// Validate that the mapping is in range.
|
||||
R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
|
||||
static_cast<void*>(out), address, size);
|
||||
// Get kernel instance.
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Validate address / size.
|
||||
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
|
||||
R_UNLESS(size > 0, ResultInvalidSize);
|
||||
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Create the code memory.
|
||||
|
||||
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
|
||||
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
|
||||
|
||||
// Verify that the region is in range.
|
||||
R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// Initialize the code memory.
|
||||
R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
|
||||
|
||||
// Register the code memory.
|
||||
KCodeMemory::Register(kernel, code_mem);
|
||||
|
||||
// Add the code memory to the handle table.
|
||||
R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
|
||||
|
||||
code_mem->Close();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
|
||||
VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
|
||||
"permission=0x{:X}",
|
||||
code_memory_handle, operation, address, size, perm);
|
||||
|
||||
// Validate the address / size.
|
||||
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
|
||||
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
|
||||
R_UNLESS(size > 0, ResultInvalidSize);
|
||||
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
|
||||
|
||||
// Get the code memory from its handle.
|
||||
KScopedAutoObject code_mem =
|
||||
system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
|
||||
R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
|
||||
|
||||
// NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
|
||||
// This enables homebrew usage of these SVCs for JIT.
|
||||
|
||||
// Perform the operation.
|
||||
switch (static_cast<CodeMemoryOperation>(operation)) {
|
||||
case CodeMemoryOperation::Map: {
|
||||
// Check that the region is in range.
|
||||
R_UNLESS(
|
||||
system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Check the memory permission.
|
||||
R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
|
||||
|
||||
// Map the memory.
|
||||
R_TRY(code_mem->Map(address, size));
|
||||
} break;
|
||||
case CodeMemoryOperation::Unmap: {
|
||||
// Check that the region is in range.
|
||||
R_UNLESS(
|
||||
system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Check the memory permission.
|
||||
R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(code_mem->Unmap(address, size));
|
||||
} break;
|
||||
case CodeMemoryOperation::MapToOwner: {
|
||||
// Check that the region is in range.
|
||||
R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
|
||||
KMemoryState::GeneratedCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Check the memory permission.
|
||||
R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
|
||||
|
||||
// Map the memory to its owner.
|
||||
R_TRY(code_mem->MapToOwner(address, size, perm));
|
||||
} break;
|
||||
case CodeMemoryOperation::UnmapFromOwner: {
|
||||
// Check that the region is in range.
|
||||
R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
|
||||
KMemoryState::GeneratedCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
|
||||
// Check the memory permission.
|
||||
R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
|
||||
|
||||
// Unmap the memory from its owner.
|
||||
R_TRY(code_mem->UnmapFromOwner(address, size));
|
||||
} break;
|
||||
default:
|
||||
return ResultInvalidEnumValue;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
|
||||
VAddr page_info_address, Handle process_handle,
|
||||
VAddr address) {
|
||||
@@ -2811,8 +2600,8 @@ static const FunctionDef SVC_Table_64[] = {
|
||||
{0x48, nullptr, "MapPhysicalMemoryUnsafe"},
|
||||
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
|
||||
{0x4A, nullptr, "SetUnsafeLimit"},
|
||||
{0x4B, SvcWrap64<CreateCodeMemory>, "CreateCodeMemory"},
|
||||
{0x4C, SvcWrap64<ControlCodeMemory>, "ControlCodeMemory"},
|
||||
{0x4B, nullptr, "CreateCodeMemory"},
|
||||
{0x4C, nullptr, "ControlCodeMemory"},
|
||||
{0x4D, nullptr, "SleepSystem"},
|
||||
{0x4E, nullptr, "ReadWriteRegister"},
|
||||
{0x4F, nullptr, "SetProcessActivity"},
|
||||
@@ -2852,8 +2641,8 @@ static const FunctionDef SVC_Table_64[] = {
|
||||
{0x71, nullptr, "ManageNamedPort"},
|
||||
{0x72, nullptr, "ConnectToPort"},
|
||||
{0x73, SvcWrap64<SetProcessMemoryPermission>, "SetProcessMemoryPermission"},
|
||||
{0x74, SvcWrap64<MapProcessMemory>, "MapProcessMemory"},
|
||||
{0x75, SvcWrap64<UnmapProcessMemory>, "UnmapProcessMemory"},
|
||||
{0x74, nullptr, "MapProcessMemory"},
|
||||
{0x75, nullptr, "UnmapProcessMemory"},
|
||||
{0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
|
||||
{0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
|
||||
{0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
|
||||
|
||||
@@ -73,23 +73,6 @@ void SvcWrap64(Core::System& system) {
|
||||
.raw);
|
||||
}
|
||||
|
||||
// Used by MapProcessMemory and UnmapProcessMemory
|
||||
template <ResultCode func(Core::System&, u64, u32, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
Param(system, 2), Param(system, 3))
|
||||
.raw);
|
||||
}
|
||||
|
||||
// Used by ControlCodeMemory
|
||||
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
|
||||
static_cast<Svc::MemoryPermission>(Param(system, 4)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u32*)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param = 0;
|
||||
@@ -318,16 +301,6 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by CreateCodeMemory
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2)).raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
@@ -16,10 +15,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
|
||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
|
||||
{
|
||||
KScopedSchedulerLock sl(system.Kernel());
|
||||
thread->OnTimer();
|
||||
}
|
||||
thread->Wakeup();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
#include "core/hle/service/glue/bgtc.h"
|
||||
#include "core/hle/service/glue/ectx.h"
|
||||
#include "core/hle/service/glue/glue.h"
|
||||
#include "core/hle/service/glue/notif.h"
|
||||
|
||||
namespace Service::Glue {
|
||||
|
||||
@@ -25,9 +24,6 @@ void InstallInterfaces(Core::System& system) {
|
||||
|
||||
// Error Context
|
||||
std::make_shared<ECTX_AW>(system)->InstallAsService(system.ServiceManager());
|
||||
|
||||
// Notification Services for application
|
||||
std::make_shared<NOTIF_A>(system)->InstallAsService(system.ServiceManager());
|
||||
}
|
||||
|
||||
} // namespace Service::Glue
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/service/glue/notif.h"
|
||||
|
||||
namespace Service::Glue {
|
||||
|
||||
NOTIF_A::NOTIF_A(Core::System& system_) : ServiceFramework{system_, "notif:a"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{500, nullptr, "RegisterAlarmSetting"},
|
||||
{510, nullptr, "UpdateAlarmSetting"},
|
||||
{520, &NOTIF_A::ListAlarmSettings, "ListAlarmSettings"},
|
||||
{530, nullptr, "LoadApplicationParameter"},
|
||||
{540, nullptr, "DeleteAlarmSetting"},
|
||||
{1000, &NOTIF_A::Initialize, "Initialize"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
NOTIF_A::~NOTIF_A() = default;
|
||||
|
||||
void NOTIF_A::ListAlarmSettings(Kernel::HLERequestContext& ctx) {
|
||||
// Returns an array of AlarmSetting
|
||||
constexpr s32 alarm_count = 0;
|
||||
|
||||
LOG_WARNING(Service_NOTIF, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(alarm_count);
|
||||
}
|
||||
|
||||
void NOTIF_A::Initialize(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NOTIF, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
} // namespace Service::Glue
|
||||
@@ -1,25 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Service::Glue {
|
||||
|
||||
class NOTIF_A final : public ServiceFramework<NOTIF_A> {
|
||||
public:
|
||||
explicit NOTIF_A(Core::System& system_);
|
||||
~NOTIF_A() override;
|
||||
|
||||
private:
|
||||
void ListAlarmSettings(Kernel::HLERequestContext& ctx);
|
||||
void Initialize(Kernel::HLERequestContext& ctx);
|
||||
};
|
||||
|
||||
} // namespace Service::Glue
|
||||
@@ -110,7 +110,7 @@ void Controller_NPad::ControllerUpdate(Core::HID::ControllerTriggerType type,
|
||||
UpdateControllerAt(npad_type, npad_id, is_connected);
|
||||
break;
|
||||
case Core::HID::ControllerTriggerType::Battery: {
|
||||
if (!controller.device->IsConnected()) {
|
||||
if (!controller.is_connected) {
|
||||
return;
|
||||
}
|
||||
auto& shared_memory = controller.shared_memory_entry;
|
||||
@@ -150,6 +150,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
|
||||
shared_memory.system_properties.is_vertical.Assign(1);
|
||||
shared_memory.system_properties.use_plus.Assign(1);
|
||||
shared_memory.system_properties.use_minus.Assign(1);
|
||||
shared_memory.assignment_mode = NpadJoyAssignmentMode::Single;
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::SwitchProController;
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::Handheld:
|
||||
@@ -165,30 +166,21 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::JoyconDual:
|
||||
shared_memory.style_tag.joycon_dual.Assign(1);
|
||||
if (controller.is_dual_left_connected) {
|
||||
shared_memory.device_type.joycon_left.Assign(1);
|
||||
shared_memory.system_properties.use_minus.Assign(1);
|
||||
}
|
||||
if (controller.is_dual_right_connected) {
|
||||
shared_memory.device_type.joycon_right.Assign(1);
|
||||
shared_memory.system_properties.use_plus.Assign(1);
|
||||
}
|
||||
shared_memory.system_properties.use_directional_buttons.Assign(1);
|
||||
shared_memory.device_type.joycon_left.Assign(1);
|
||||
shared_memory.device_type.joycon_right.Assign(1);
|
||||
shared_memory.system_properties.is_vertical.Assign(1);
|
||||
shared_memory.system_properties.use_plus.Assign(1);
|
||||
shared_memory.system_properties.use_minus.Assign(1);
|
||||
shared_memory.system_properties.use_directional_buttons.Assign(1);
|
||||
shared_memory.assignment_mode = NpadJoyAssignmentMode::Dual;
|
||||
if (controller.is_dual_left_connected && controller.is_dual_right_connected) {
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyDual;
|
||||
} else if (controller.is_dual_left_connected) {
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyDualLeftOnly;
|
||||
} else {
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyDualRightOnly;
|
||||
}
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyDual;
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::JoyconLeft:
|
||||
shared_memory.style_tag.joycon_left.Assign(1);
|
||||
shared_memory.device_type.joycon_left.Assign(1);
|
||||
shared_memory.system_properties.is_horizontal.Assign(1);
|
||||
shared_memory.system_properties.use_minus.Assign(1);
|
||||
shared_memory.assignment_mode = NpadJoyAssignmentMode::Single;
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyLeftHorizontal;
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::JoyconRight:
|
||||
@@ -196,6 +188,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
|
||||
shared_memory.device_type.joycon_right.Assign(1);
|
||||
shared_memory.system_properties.is_horizontal.Assign(1);
|
||||
shared_memory.system_properties.use_plus.Assign(1);
|
||||
shared_memory.assignment_mode = NpadJoyAssignmentMode::Single;
|
||||
shared_memory.applet_footer.type = AppletFooterUiType::JoyRightHorizontal;
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::GameCube:
|
||||
@@ -207,6 +200,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
|
||||
case Core::HID::NpadStyleIndex::Pokeball:
|
||||
shared_memory.style_tag.palma.Assign(1);
|
||||
shared_memory.device_type.palma.Assign(1);
|
||||
shared_memory.assignment_mode = NpadJoyAssignmentMode::Single;
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::NES:
|
||||
shared_memory.style_tag.lark.Assign(1);
|
||||
@@ -449,15 +443,11 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
|
||||
case Core::HID::NpadStyleIndex::JoyconDual:
|
||||
pad_state.connection_status.raw = 0;
|
||||
pad_state.connection_status.is_connected.Assign(1);
|
||||
if (controller.is_dual_left_connected) {
|
||||
pad_state.connection_status.is_left_connected.Assign(1);
|
||||
libnx_state.connection_status.is_left_connected.Assign(1);
|
||||
}
|
||||
if (controller.is_dual_right_connected) {
|
||||
pad_state.connection_status.is_right_connected.Assign(1);
|
||||
libnx_state.connection_status.is_right_connected.Assign(1);
|
||||
}
|
||||
pad_state.connection_status.is_left_connected.Assign(1);
|
||||
pad_state.connection_status.is_right_connected.Assign(1);
|
||||
|
||||
libnx_state.connection_status.is_left_connected.Assign(1);
|
||||
libnx_state.connection_status.is_right_connected.Assign(1);
|
||||
pad_state.sampling_number =
|
||||
npad.joy_dual_lifo.ReadCurrentEntry().state.sampling_number + 1;
|
||||
npad.joy_dual_lifo.WriteNextEntry(pad_state);
|
||||
@@ -697,7 +687,7 @@ Controller_NPad::NpadCommunicationMode Controller_NPad::GetNpadCommunicationMode
|
||||
return communication_mode;
|
||||
}
|
||||
|
||||
void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceType npad_device_type,
|
||||
void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id,
|
||||
NpadJoyAssignmentMode assignment_mode) {
|
||||
if (!IsNpadIdValid(npad_id)) {
|
||||
LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id);
|
||||
@@ -708,62 +698,6 @@ void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceTy
|
||||
if (controller.shared_memory_entry.assignment_mode != assignment_mode) {
|
||||
controller.shared_memory_entry.assignment_mode = assignment_mode;
|
||||
}
|
||||
|
||||
if (!controller.device->IsConnected()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (assignment_mode == NpadJoyAssignmentMode::Dual) {
|
||||
if (controller.device->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft) {
|
||||
DisconnectNpad(npad_id);
|
||||
controller.is_dual_left_connected = true;
|
||||
controller.is_dual_right_connected = false;
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true);
|
||||
return;
|
||||
}
|
||||
if (controller.device->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight) {
|
||||
DisconnectNpad(npad_id);
|
||||
controller.is_dual_left_connected = false;
|
||||
controller.is_dual_right_connected = true;
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// This is for NpadJoyAssignmentMode::Single
|
||||
|
||||
// Only JoyconDual get affected by this function
|
||||
if (controller.device->GetNpadStyleIndex() != Core::HID::NpadStyleIndex::JoyconDual) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (controller.is_dual_left_connected && !controller.is_dual_right_connected) {
|
||||
DisconnectNpad(npad_id);
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true);
|
||||
return;
|
||||
}
|
||||
if (!controller.is_dual_left_connected && controller.is_dual_right_connected) {
|
||||
DisconnectNpad(npad_id);
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true);
|
||||
return;
|
||||
}
|
||||
|
||||
// We have two controllers connected to the same npad_id we need to split them
|
||||
const auto npad_id_2 = hid_core.GetFirstDisconnectedNpadId();
|
||||
auto& controller_2 = GetControllerFromNpadIdType(npad_id_2);
|
||||
DisconnectNpad(npad_id);
|
||||
if (npad_device_type == NpadJoyDeviceType::Left) {
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true);
|
||||
controller_2.is_dual_left_connected = false;
|
||||
controller_2.is_dual_right_connected = true;
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true);
|
||||
} else {
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true);
|
||||
controller_2.is_dual_left_connected = true;
|
||||
controller_2.is_dual_right_connected = false;
|
||||
UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true);
|
||||
}
|
||||
}
|
||||
|
||||
bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
|
||||
@@ -973,7 +907,6 @@ void Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
|
||||
}
|
||||
|
||||
auto& shared_memory_entry = controller.shared_memory_entry;
|
||||
// Don't reset shared_memory_entry.assignment_mode this value is persistent
|
||||
shared_memory_entry.style_tag.raw = Core::HID::NpadStyleSet::None; // Zero out
|
||||
shared_memory_entry.device_type.raw = 0;
|
||||
shared_memory_entry.system_properties.raw = 0;
|
||||
@@ -990,10 +923,9 @@ void Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) {
|
||||
.left = {},
|
||||
.right = {},
|
||||
};
|
||||
shared_memory_entry.assignment_mode = NpadJoyAssignmentMode::Dual;
|
||||
shared_memory_entry.applet_footer.type = AppletFooterUiType::None;
|
||||
|
||||
controller.is_dual_left_connected = true;
|
||||
controller.is_dual_right_connected = true;
|
||||
controller.is_connected = false;
|
||||
controller.device->Disconnect();
|
||||
SignalStyleSetChangedEvent(npad_id);
|
||||
@@ -1090,70 +1022,19 @@ void Controller_NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1,
|
||||
npad_id_2);
|
||||
return;
|
||||
}
|
||||
auto& controller_1 = GetControllerFromNpadIdType(npad_id_1);
|
||||
auto& controller_2 = GetControllerFromNpadIdType(npad_id_2);
|
||||
const auto controller_style_1 = controller_1.device->GetNpadStyleIndex();
|
||||
const auto controller_style_2 = controller_2.device->GetNpadStyleIndex();
|
||||
bool merge_controllers = false;
|
||||
auto& controller_1 = GetControllerFromNpadIdType(npad_id_1).device;
|
||||
auto& controller_2 = GetControllerFromNpadIdType(npad_id_2).device;
|
||||
|
||||
// If the controllers at both npad indices form a pair of left and right joycons, merge them.
|
||||
// Otherwise, do nothing.
|
||||
if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
controller_style_2 == Core::HID::NpadStyleIndex::JoyconRight) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
controller_style_1 == Core::HID::NpadStyleIndex::JoyconRight) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_2 == Core::HID::NpadStyleIndex::JoyconRight &&
|
||||
controller_1.is_dual_left_connected && !controller_1.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_2 == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
!controller_1.is_dual_left_connected && controller_1.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_1 == Core::HID::NpadStyleIndex::JoyconRight &&
|
||||
controller_2.is_dual_left_connected && !controller_2.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_1 == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
!controller_2.is_dual_left_connected && controller_2.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_1.is_dual_left_connected && !controller_1.is_dual_right_connected &&
|
||||
!controller_2.is_dual_left_connected && controller_2.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual &&
|
||||
!controller_1.is_dual_left_connected && controller_1.is_dual_right_connected &&
|
||||
controller_2.is_dual_left_connected && !controller_2.is_dual_right_connected) {
|
||||
merge_controllers = true;
|
||||
}
|
||||
|
||||
if (merge_controllers) {
|
||||
if ((controller_1->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
controller_2->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight) ||
|
||||
(controller_2->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft &&
|
||||
controller_1->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight)) {
|
||||
// Disconnect the joycon at the second id and connect the dual joycon at the first index.
|
||||
DisconnectNpad(npad_id_2);
|
||||
controller_1.is_dual_left_connected = true;
|
||||
controller_1.is_dual_right_connected = true;
|
||||
AddNewControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_1);
|
||||
return;
|
||||
}
|
||||
LOG_WARNING(Service_HID,
|
||||
"Controllers can't be merged npad_id_1:{}, npad_id_2:{}, type_1:{}, type_2:{}, "
|
||||
"dual_1(left/right):{}/{}, dual_2(left/right):{}/{}",
|
||||
npad_id_1, npad_id_2, controller_1.device->GetNpadStyleIndex(),
|
||||
controller_2.device->GetNpadStyleIndex(), controller_1.is_dual_left_connected,
|
||||
controller_1.is_dual_right_connected, controller_2.is_dual_left_connected,
|
||||
controller_2.is_dual_right_connected);
|
||||
}
|
||||
|
||||
void Controller_NPad::StartLRAssignmentMode() {
|
||||
|
||||
@@ -113,8 +113,7 @@ public:
|
||||
void SetNpadCommunicationMode(NpadCommunicationMode communication_mode_);
|
||||
NpadCommunicationMode GetNpadCommunicationMode() const;
|
||||
|
||||
void SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceType npad_device_type,
|
||||
NpadJoyAssignmentMode assignment_mode);
|
||||
void SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyAssignmentMode assignment_mode);
|
||||
|
||||
bool VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, std::size_t device_index,
|
||||
const Core::HID::VibrationValue& vibration_value);
|
||||
@@ -465,10 +464,7 @@ private:
|
||||
std::array<VibrationData, 2> vibration{};
|
||||
bool unintended_home_button_input_protection{};
|
||||
bool is_connected{};
|
||||
|
||||
// Dual joycons can have only one side connected
|
||||
bool is_dual_left_connected{true};
|
||||
bool is_dual_right_connected{true};
|
||||
Core::HID::NpadStyleIndex npad_type{Core::HID::NpadStyleIndex::None};
|
||||
|
||||
// Motion parameters
|
||||
bool sixaxis_at_rest{true};
|
||||
|
||||
@@ -293,8 +293,8 @@ Hid::Hid(Core::System& system_)
|
||||
{132, &Hid::EnableUnintendedHomeButtonInputProtection, "EnableUnintendedHomeButtonInputProtection"},
|
||||
{133, nullptr, "SetNpadJoyAssignmentModeSingleWithDestination"},
|
||||
{134, &Hid::SetNpadAnalogStickUseCenterClamp, "SetNpadAnalogStickUseCenterClamp"},
|
||||
{135, &Hid::SetNpadCaptureButtonAssignment, "SetNpadCaptureButtonAssignment"},
|
||||
{136, &Hid::ClearNpadCaptureButtonAssignment, "ClearNpadCaptureButtonAssignment"},
|
||||
{135, nullptr, "SetNpadCaptureButtonAssignment"},
|
||||
{136, nullptr, "ClearNpadCaptureButtonAssignment"},
|
||||
{200, &Hid::GetVibrationDeviceInfo, "GetVibrationDeviceInfo"},
|
||||
{201, &Hid::SendVibrationValue, "SendVibrationValue"},
|
||||
{202, &Hid::GetActualVibrationValue, "GetActualVibrationValue"},
|
||||
@@ -975,35 +975,35 @@ void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
|
||||
applet_resource->GetController<Controller_NPad>(HidController::NPad)
|
||||
.SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyDeviceType::Left,
|
||||
Controller_NPad::NpadJoyAssignmentMode::Single);
|
||||
.SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Single);
|
||||
|
||||
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
|
||||
parameters.applet_resource_user_id);
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
|
||||
parameters.npad_id, parameters.applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) {
|
||||
// TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
Core::HID::NpadIdType npad_id;
|
||||
INSERT_PADDING_WORDS_NOINIT(1);
|
||||
u64 applet_resource_user_id;
|
||||
Controller_NPad::NpadJoyDeviceType npad_joy_device_type;
|
||||
u64 npad_joy_device_type;
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
|
||||
applet_resource->GetController<Controller_NPad>(HidController::NPad)
|
||||
.SetNpadMode(parameters.npad_id, parameters.npad_joy_device_type,
|
||||
Controller_NPad::NpadJoyAssignmentMode::Single);
|
||||
.SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Single);
|
||||
|
||||
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
|
||||
parameters.npad_id, parameters.applet_resource_user_id,
|
||||
parameters.npad_joy_device_type);
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
|
||||
parameters.npad_id, parameters.applet_resource_user_id,
|
||||
parameters.npad_joy_device_type);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@@ -1021,10 +1021,10 @@ void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
|
||||
applet_resource->GetController<Controller_NPad>(HidController::NPad)
|
||||
.SetNpadMode(parameters.npad_id, {}, Controller_NPad::NpadJoyAssignmentMode::Dual);
|
||||
.SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Dual);
|
||||
|
||||
LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id,
|
||||
parameters.applet_resource_user_id);
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
|
||||
parameters.npad_id, parameters.applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
@@ -1186,37 +1186,6 @@ void Hid::SetNpadAnalogStickUseCenterClamp(Kernel::HLERequestContext& ctx) {
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Hid::SetNpadCaptureButtonAssignment(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
struct Parameters {
|
||||
Core::HID::NpadStyleSet npad_styleset;
|
||||
INSERT_PADDING_WORDS_NOINIT(1);
|
||||
u64 applet_resource_user_id;
|
||||
Core::HID::NpadButton button;
|
||||
};
|
||||
static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size.");
|
||||
|
||||
const auto parameters{rp.PopRaw<Parameters>()};
|
||||
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, npad_styleset={}, applet_resource_user_id={}, button={}",
|
||||
parameters.npad_styleset, parameters.applet_resource_user_id, parameters.button);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Hid::ClearNpadCaptureButtonAssignment(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
|
||||
applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
|
||||
|
||||
@@ -136,8 +136,6 @@ private:
|
||||
void IsUnintendedHomeButtonInputProtectionEnabled(Kernel::HLERequestContext& ctx);
|
||||
void EnableUnintendedHomeButtonInputProtection(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadAnalogStickUseCenterClamp(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadCaptureButtonAssignment(Kernel::HLERequestContext& ctx);
|
||||
void ClearNpadCaptureButtonAssignment(Kernel::HLERequestContext& ctx);
|
||||
void GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx);
|
||||
void SendVibrationValue(Kernel::HLERequestContext& ctx);
|
||||
void GetActualVibrationValue(Kernel::HLERequestContext& ctx);
|
||||
|
||||
@@ -20,12 +20,8 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||
switch (command.group) {
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1: {
|
||||
if (!fd_to_id.contains(fd)) {
|
||||
fd_to_id[fd] = next_id++;
|
||||
}
|
||||
return Submit(fd, input, output);
|
||||
}
|
||||
case 0x1:
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
@@ -70,10 +66,7 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {}
|
||||
|
||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
const auto iter = fd_to_id.find(fd);
|
||||
if (iter != fd_to_id.end()) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
system.GPU().ClearCdmaInstance();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -24,9 +24,6 @@ public:
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
u32 next_id{};
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -59,8 +59,7 @@ NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
|
||||
std::vector<u8>& output) {
|
||||
NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSubmit params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||
LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count);
|
||||
@@ -94,7 +93,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
|
||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||
system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
|
||||
cmdlist.size() * sizeof(u32));
|
||||
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
|
||||
gpu.PushCommandBuffer(cmdlist);
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||
// Some games expect command_buffers to be written back
|
||||
|
||||
@@ -104,14 +104,13 @@ protected:
|
||||
|
||||
/// Ioctl command implementations
|
||||
NvResult SetNVMAPfd(const std::vector<u8>& input);
|
||||
NvResult Submit(DeviceFD fd, const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
|
||||
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
||||
s32_le nvmap_fd{};
|
||||
u32_le submit_timeout{};
|
||||
std::shared_ptr<nvmap> nvmap_dev;
|
||||
|
||||
@@ -21,10 +21,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
|
||||
case 0x0:
|
||||
switch (command.cmd) {
|
||||
case 0x1:
|
||||
if (!fd_to_id.contains(fd)) {
|
||||
fd_to_id[fd] = next_id++;
|
||||
}
|
||||
return Submit(fd, input, output);
|
||||
return Submit(input, output);
|
||||
case 0x2:
|
||||
return GetSyncpoint(input, output);
|
||||
case 0x3:
|
||||
@@ -68,10 +65,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
|
||||
void nvhost_vic::OnOpen(DeviceFD fd) {}
|
||||
|
||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||
const auto iter = fd_to_id.find(fd);
|
||||
if (iter != fd_to_id.end()) {
|
||||
system.GPU().ClearCdmaInstance(iter->second);
|
||||
}
|
||||
system.GPU().ClearCdmaInstance();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -23,8 +23,5 @@ public:
|
||||
|
||||
void OnOpen(DeviceFD fd) override;
|
||||
void OnClose(DeviceFD fd) override;
|
||||
|
||||
private:
|
||||
u32 next_id{};
|
||||
};
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
@@ -125,9 +125,8 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
|
||||
}
|
||||
metadata.Print();
|
||||
|
||||
const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2",
|
||||
"subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7",
|
||||
"subsdk8", "subsdk9", "sdk"};
|
||||
const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3",
|
||||
"subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"};
|
||||
|
||||
// Use the NSO module loader to figure out the code layout
|
||||
std::size_t code_size{};
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
add_library(shader_recompiler STATIC
|
||||
backend/bindings.h
|
||||
backend/glasm/emit_context.cpp
|
||||
backend/glasm/emit_context.h
|
||||
backend/glasm/emit_glasm.cpp
|
||||
backend/glasm/emit_glasm.h
|
||||
backend/glasm/emit_glasm_barriers.cpp
|
||||
@@ -20,10 +22,10 @@ add_library(shader_recompiler STATIC
|
||||
backend/glasm/emit_glasm_special.cpp
|
||||
backend/glasm/emit_glasm_undefined.cpp
|
||||
backend/glasm/emit_glasm_warp.cpp
|
||||
backend/glasm/glasm_emit_context.cpp
|
||||
backend/glasm/glasm_emit_context.h
|
||||
backend/glasm/reg_alloc.cpp
|
||||
backend/glasm/reg_alloc.h
|
||||
backend/glsl/emit_context.cpp
|
||||
backend/glsl/emit_context.h
|
||||
backend/glsl/emit_glsl.cpp
|
||||
backend/glsl/emit_glsl.h
|
||||
backend/glsl/emit_glsl_atomic.cpp
|
||||
@@ -45,10 +47,10 @@ add_library(shader_recompiler STATIC
|
||||
backend/glsl/emit_glsl_special.cpp
|
||||
backend/glsl/emit_glsl_undefined.cpp
|
||||
backend/glsl/emit_glsl_warp.cpp
|
||||
backend/glsl/glsl_emit_context.cpp
|
||||
backend/glsl/glsl_emit_context.h
|
||||
backend/glsl/var_alloc.cpp
|
||||
backend/glsl/var_alloc.h
|
||||
backend/spirv/emit_context.cpp
|
||||
backend/spirv/emit_context.h
|
||||
backend/spirv/emit_spirv.cpp
|
||||
backend/spirv/emit_spirv.h
|
||||
backend/spirv/emit_spirv_atomic.cpp
|
||||
@@ -70,8 +72,6 @@ add_library(shader_recompiler STATIC
|
||||
backend/spirv/emit_spirv_special.cpp
|
||||
backend/spirv/emit_spirv_undefined.cpp
|
||||
backend/spirv/emit_spirv_warp.cpp
|
||||
backend/spirv/spirv_emit_context.cpp
|
||||
backend/spirv/spirv_emit_context.h
|
||||
environment.h
|
||||
exception.h
|
||||
frontend/ir/abstract_syntax_list.h
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/bindings.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/program.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
@@ -9,9 +9,9 @@
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/settings.h"
|
||||
#include "shader_recompiler/backend/bindings.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/ir_emitter.h"
|
||||
#include "shader_recompiler/frontend/ir/program.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
void EmitBarrier(EmitContext& ctx) {
|
||||
ctx.Add("BAR;");
|
||||
}
|
||||
|
||||
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
|
||||
ctx.Add("MEMBAR.CTA;");
|
||||
}
|
||||
|
||||
void EmitDeviceMemoryBarrier(EmitContext& ctx) {
|
||||
ctx.Add("MEMBAR;");
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
@@ -335,35 +335,6 @@ void EmitSetFragDepth(EmitContext& ctx, ScalarF32 value) {
|
||||
ctx.Add("MOV.F result.depth.z,{};", value);
|
||||
}
|
||||
|
||||
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {},invocation.groupid;", inst);
|
||||
}
|
||||
|
||||
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {},invocation.localid;", inst);
|
||||
}
|
||||
|
||||
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,primitive_invocation.x;", inst);
|
||||
}
|
||||
|
||||
void EmitSampleId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,fragment.sampleid.x;", inst);
|
||||
}
|
||||
|
||||
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,fragment.helperthread.x;", inst);
|
||||
}
|
||||
|
||||
void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.uses_y_direction = true;
|
||||
ctx.Add("MOV.F {}.x,y_direction[0].w;", inst);
|
||||
}
|
||||
|
||||
void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.F {}.x,scaling[0].z;", inst);
|
||||
}
|
||||
|
||||
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset) {
|
||||
ctx.Add("MOV.U {},lmem[{}].x;", inst, word_offset);
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
void EmitJoin(EmitContext&) {
|
||||
throw NotImplementedException("Join shouldn't be emitted");
|
||||
}
|
||||
|
||||
void EmitDemoteToHelperInvocation(EmitContext& ctx) {
|
||||
ctx.Add("KIL TR.x;");
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/modifiers.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/modifiers.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/modifiers.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("OR.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("AND.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("XOR.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
|
||||
ctx.Add("SEQ.S {},{},0;", inst, value);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/program.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/program.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
@@ -17,6 +17,110 @@ namespace Shader::Backend::GLASM {
|
||||
|
||||
#define NotImplemented() throw NotImplementedException("GLASM instruction {}", __LINE__)
|
||||
|
||||
static void DefinePhi(EmitContext& ctx, IR::Inst& phi) {
|
||||
switch (phi.Type()) {
|
||||
case IR::Type::U1:
|
||||
case IR::Type::U32:
|
||||
case IR::Type::F32:
|
||||
ctx.reg_alloc.Define(phi);
|
||||
break;
|
||||
case IR::Type::U64:
|
||||
case IR::Type::F64:
|
||||
ctx.reg_alloc.LongDefine(phi);
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Phi node type {}", phi.Type());
|
||||
}
|
||||
}
|
||||
|
||||
void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
|
||||
const size_t num_args{phi.NumArgs()};
|
||||
for (size_t i = 0; i < num_args; ++i) {
|
||||
ctx.reg_alloc.Consume(phi.Arg(i));
|
||||
}
|
||||
if (!phi.Definition<Id>().is_valid) {
|
||||
// The phi node wasn't forward defined
|
||||
DefinePhi(ctx, phi);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitVoid(EmitContext&) {}
|
||||
|
||||
void EmitReference(EmitContext& ctx, const IR::Value& value) {
|
||||
ctx.reg_alloc.Consume(value);
|
||||
}
|
||||
|
||||
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) {
|
||||
IR::Inst& phi{RegAlloc::AliasInst(*phi_value.Inst())};
|
||||
if (!phi.Definition<Id>().is_valid) {
|
||||
// The phi node wasn't forward defined
|
||||
DefinePhi(ctx, phi);
|
||||
}
|
||||
const Register phi_reg{ctx.reg_alloc.Consume(IR::Value{&phi})};
|
||||
const Value eval_value{ctx.reg_alloc.Consume(value)};
|
||||
|
||||
if (phi_reg == eval_value) {
|
||||
return;
|
||||
}
|
||||
switch (phi.Flags<IR::Type>()) {
|
||||
case IR::Type::U1:
|
||||
case IR::Type::U32:
|
||||
case IR::Type::F32:
|
||||
ctx.Add("MOV.S {}.x,{};", phi_reg, ScalarS32{eval_value});
|
||||
break;
|
||||
case IR::Type::U64:
|
||||
case IR::Type::F64:
|
||||
ctx.Add("MOV.U64 {}.x,{};", phi_reg, ScalarRegister{eval_value});
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Phi node type {}", phi.Type());
|
||||
}
|
||||
}
|
||||
|
||||
void EmitJoin(EmitContext& ctx) {
|
||||
NotImplemented();
|
||||
}
|
||||
|
||||
void EmitDemoteToHelperInvocation(EmitContext& ctx) {
|
||||
ctx.Add("KIL TR.x;");
|
||||
}
|
||||
|
||||
void EmitBarrier(EmitContext& ctx) {
|
||||
ctx.Add("BAR;");
|
||||
}
|
||||
|
||||
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
|
||||
ctx.Add("MEMBAR.CTA;");
|
||||
}
|
||||
|
||||
void EmitDeviceMemoryBarrier(EmitContext& ctx) {
|
||||
ctx.Add("MEMBAR;");
|
||||
}
|
||||
|
||||
void EmitPrologue(EmitContext& ctx) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void EmitEpilogue(EmitContext& ctx) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) {
|
||||
if (stream.type == Type::U32 && stream.imm_u32 == 0) {
|
||||
ctx.Add("EMIT;");
|
||||
} else {
|
||||
ctx.Add("EMITS {};", stream);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
|
||||
if (!stream.IsImmediate()) {
|
||||
LOG_WARNING(Shader_GLASM, "Stream is not immediate");
|
||||
}
|
||||
ctx.reg_alloc.Consume(stream);
|
||||
ctx.Add("ENDPRIM;");
|
||||
}
|
||||
|
||||
void EmitGetRegister(EmitContext& ctx) {
|
||||
NotImplemented();
|
||||
}
|
||||
@@ -81,6 +185,55 @@ void EmitSetOFlag(EmitContext& ctx) {
|
||||
NotImplemented();
|
||||
}
|
||||
|
||||
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {},invocation.groupid;", inst);
|
||||
}
|
||||
|
||||
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {},invocation.localid;", inst);
|
||||
}
|
||||
|
||||
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,primitive_invocation.x;", inst);
|
||||
}
|
||||
|
||||
void EmitSampleId(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,fragment.sampleid.x;", inst);
|
||||
}
|
||||
|
||||
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,fragment.helperthread.x;", inst);
|
||||
}
|
||||
|
||||
void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.uses_y_direction = true;
|
||||
ctx.Add("MOV.F {}.x,y_direction[0].w;", inst);
|
||||
}
|
||||
|
||||
void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.F {}.x,scaling[0].z;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.LongAdd("MOV.S64 {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitGetZeroFromOp(EmitContext& ctx) {
|
||||
NotImplemented();
|
||||
}
|
||||
@@ -105,4 +258,20 @@ void EmitGetInBoundsFromOp(EmitContext& ctx) {
|
||||
NotImplemented();
|
||||
}
|
||||
|
||||
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("OR.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("AND.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
|
||||
ctx.Add("XOR.S {},{},{};", inst, a, b);
|
||||
}
|
||||
|
||||
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
|
||||
ctx.Add("SEQ.S {},{},0;", inst, value);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
static void DefinePhi(EmitContext& ctx, IR::Inst& phi) {
|
||||
switch (phi.Type()) {
|
||||
case IR::Type::U1:
|
||||
case IR::Type::U32:
|
||||
case IR::Type::F32:
|
||||
ctx.reg_alloc.Define(phi);
|
||||
break;
|
||||
case IR::Type::U64:
|
||||
case IR::Type::F64:
|
||||
ctx.reg_alloc.LongDefine(phi);
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Phi node type {}", phi.Type());
|
||||
}
|
||||
}
|
||||
|
||||
void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
|
||||
const size_t num_args{phi.NumArgs()};
|
||||
for (size_t i = 0; i < num_args; ++i) {
|
||||
ctx.reg_alloc.Consume(phi.Arg(i));
|
||||
}
|
||||
if (!phi.Definition<Id>().is_valid) {
|
||||
// The phi node wasn't forward defined
|
||||
DefinePhi(ctx, phi);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitVoid(EmitContext&) {}
|
||||
|
||||
void EmitReference(EmitContext& ctx, const IR::Value& value) {
|
||||
ctx.reg_alloc.Consume(value);
|
||||
}
|
||||
|
||||
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) {
|
||||
IR::Inst& phi{RegAlloc::AliasInst(*phi_value.Inst())};
|
||||
if (!phi.Definition<Id>().is_valid) {
|
||||
// The phi node wasn't forward defined
|
||||
DefinePhi(ctx, phi);
|
||||
}
|
||||
const Register phi_reg{ctx.reg_alloc.Consume(IR::Value{&phi})};
|
||||
const Value eval_value{ctx.reg_alloc.Consume(value)};
|
||||
|
||||
if (phi_reg == eval_value) {
|
||||
return;
|
||||
}
|
||||
switch (phi.Flags<IR::Type>()) {
|
||||
case IR::Type::U1:
|
||||
case IR::Type::U32:
|
||||
case IR::Type::F32:
|
||||
ctx.Add("MOV.S {}.x,{};", phi_reg, ScalarS32{eval_value});
|
||||
break;
|
||||
case IR::Type::U64:
|
||||
case IR::Type::F64:
|
||||
ctx.Add("MOV.U64 {}.x,{};", phi_reg, ScalarRegister{eval_value});
|
||||
break;
|
||||
default:
|
||||
throw NotImplementedException("Phi node type {}", phi.Type());
|
||||
}
|
||||
}
|
||||
|
||||
void EmitPrologue(EmitContext&) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void EmitEpilogue(EmitContext&) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) {
|
||||
if (stream.type == Type::U32 && stream.imm_u32 == 0) {
|
||||
ctx.Add("EMIT;");
|
||||
} else {
|
||||
ctx.Add("EMITS {};", stream);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
|
||||
if (!stream.IsImmediate()) {
|
||||
LOG_WARNING(Shader_GLASM, "Stream is not immediate");
|
||||
}
|
||||
ctx.reg_alloc.Consume(stream);
|
||||
ctx.Add("ENDPRIM;");
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
|
||||
namespace Shader::Backend::GLASM {
|
||||
|
||||
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.Add("MOV.S {}.x,0;", inst);
|
||||
}
|
||||
|
||||
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) {
|
||||
ctx.LongAdd("MOV.S64 {}.x,0;", inst);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::GLASM
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "shader_recompiler/backend/glasm/glasm_emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/emit_context.h"
|
||||
#include "shader_recompiler/backend/glasm/reg_alloc.h"
|
||||
#include "shader_recompiler/exception.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/bindings.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/program.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
@@ -9,9 +9,9 @@
|
||||
|
||||
#include "common/div_ceil.h"
|
||||
#include "common/settings.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/ir_emitter.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "shader_recompiler/backend/glsl/emit_context.h"
|
||||
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
|
||||
#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "shader_recompiler/runtime_info.h"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user