Compare commits

..

74 Commits

Author SHA1 Message Date
Liam
6a0d8b2aa1 service_thread: fix deletion 2022-11-06 19:50:51 -05:00
Fernando S
dc520a487d Merge pull request #9195 from vonchenplus/vmm_kinds_error
video_core:Fix vmm kinds size error
2022-11-06 15:34:24 +01:00
FengChen
aa97f39ba8 video_core:Fix vmm kinds size error 2022-11-06 22:31:22 +08:00
Fernando S
df38c03a09 Merge pull request #9163 from vonchenplus/draw_error
video_core: Fix drawing trigger mechanism regression
2022-11-06 01:13:59 +01:00
liamwhite
4c198bbf06 Merge pull request #9173 from bunnei/kern-update-15
Kernel: Various updates for FW 15.0.x
2022-11-05 13:25:29 -04:00
Liam
cf0f821565 core: hle: kernel: Address review comments. 2022-11-05 12:23:47 -04:00
Morph
8baf036cdc Merge pull request #9189 from vonchenplus/stupid
video_core: Fix scaling graphical regressions for multiple games
2022-11-05 11:57:52 -04:00
FengChen
7283010305 video_core: Fix scaling graphical regressions for multiple games 2022-11-05 17:21:02 +08:00
Mai
6f6be615f3 Merge pull request #9181 from jbeich/freebsd-qt-parity
Qt: enable recent Linux features on BSDs
2022-11-04 17:54:17 +00:00
Mai
862afa8514 Merge pull request #9178 from jbeich/freebsd-include
network: unbreak on BSDs due to missing include
2022-11-04 17:52:01 +00:00
Jan Beich
d7d7ae8219 Qt: enable recent Linux features on more Unices
- Prevent sleep via xdg-desktop-portal after fa7abafa5f
- Pause on suspend after b7642cff36
- Exit on SIGINT/SIGTERM after 9479940a1f
- Improve dark themes after b51db12567
2022-11-04 13:01:17 +00:00
Jan Beich
717c8ded82 network: add missing header for SO_* on Unix after f80c7c4cd5
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::Initialize(Network::Domain, Network::Type, Network::Protocol)':
src/core/internal_network/socket_proxy.cpp:51:20: error: 'SO_TYPE' was not declared in this scope
   51 |     SetSockOpt(fd, SO_TYPE, type);
      |                    ^~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetLinger(bool, u32)':
src/core/internal_network/socket_proxy.cpp:253:27: error: 'SO_LINGER' was not declared in this scope
  253 |     return SetSockOpt(fd, SO_LINGER, values);
      |                           ^~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetReuseAddr(bool)':
src/core/internal_network/socket_proxy.cpp:257:32: error: 'SO_REUSEADDR' was not declared in this scope
  257 |     return SetSockOpt<u32>(fd, SO_REUSEADDR, enable ? 1 : 0);
      |                                ^~~~~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetBroadcast(bool)':
src/core/internal_network/socket_proxy.cpp:262:32: error: 'SO_BROADCAST' was not declared in this scope
  262 |     return SetSockOpt<u32>(fd, SO_BROADCAST, enable ? 1 : 0);
      |                                ^~~~~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetSndBuf(u32)':
src/core/internal_network/socket_proxy.cpp:266:27: error: 'SO_SNDBUF' was not declared in this scope
  266 |     return SetSockOpt(fd, SO_SNDBUF, value);
      |                           ^~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetRcvBuf(u32)':
src/core/internal_network/socket_proxy.cpp:274:27: error: 'SO_RCVBUF' was not declared in this scope
  274 |     return SetSockOpt(fd, SO_RCVBUF, value);
      |                           ^~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetSndTimeo(u32)':
src/core/internal_network/socket_proxy.cpp:279:27: error: 'SO_SNDTIMEO' was not declared in this scope
  279 |     return SetSockOpt(fd, SO_SNDTIMEO, static_cast<int>(value));
      |                           ^~~~~~~~~~~
src/core/internal_network/socket_proxy.cpp: In member function 'virtual Network::Errno Network::ProxySocket::SetRcvTimeo(u32)':
src/core/internal_network/socket_proxy.cpp:284:27: error: 'SO_RCVTIMEO' was not declared in this scope
  284 |     return SetSockOpt(fd, SO_RCVTIMEO, static_cast<int>(value));
      |                           ^~~~~~~~~~~
2022-11-04 07:28:46 +00:00
gidoly
9fc1bcc7b2 Update shader cache version. (#9175) 2022-11-04 03:16:01 -04:00
Feng Chen
75596c07e0 video_core: Fix SNORM texture buffer emulating error (#9001) 2022-11-04 02:39:42 -04:00
Piplup
ece22fcbc7 UI: Add options to hide extra columns (#9093)
UI change that allows the user to hide the size and or file types columns
2022-11-04 02:30:39 -04:00
bunnei
38e4382f53 Merge pull request #8858 from vonchenplus/mipmap
video_core: Generate mipmap texture by drawing
2022-11-03 22:21:58 -07:00
bunnei
37de88040c Merge pull request #9135 from liamwhite/service-thread-event
kernel: invert session request handling flow
2022-11-03 21:45:56 -07:00
bunnei
05ae0cab0e core: hle: kernel: k_page_table: Remove unnecessary casts. 2022-11-03 21:17:08 -07:00
bunnei
119315af08 core: hle: kernel: k_page_table: Manually open/close pages for IPC methods. 2022-11-03 21:17:08 -07:00
bunnei
661fe06d9d core: hle: kernel: k_page_table: Implement IPC memory methods. 2022-11-03 21:17:07 -07:00
bunnei
ba21ba0c5c core: hle: kernel: k_memory_manager: Refresh. 2022-11-03 21:17:07 -07:00
bunnei
32d7faafa8 core: hle: kernel: Integrate system KSystemResource. 2022-11-03 21:17:07 -07:00
bunnei
b7b47f3099 core: hle: kernel: k_dynamic_page_manager: Refresh. 2022-11-03 21:17:07 -07:00
bunnei
6f941121e6 core: hle: kernel: Add KSystemResource. 2022-11-03 21:17:07 -07:00
bunnei
6636b81573 core: hle: kernel: k_handle_table: Refresh. 2022-11-03 21:17:07 -07:00
bunnei
1f21fa866d core: hle: kernel: k_memory_layout: Refresh. 2022-11-03 21:17:07 -07:00
bunnei
84d130f143 core: hle: kernel: k_memory_region_type: Refresh. 2022-11-03 21:17:07 -07:00
bunnei
d928ba8e40 core: hle: kernel: slab_helpers: Add KAutoObjectWithSlabHeap. 2022-11-03 21:17:06 -07:00
bunnei
3aab7d4473 core: hle: kernel: k_dynamic_resource_manager: Add KBlockInfoManager, KBlockInfoSlabHeap. 2022-11-03 21:17:06 -07:00
bunnei
6b6c02f541 core: hle: kernel: k_page_bitmap: Refresh. 2022-11-03 21:17:06 -07:00
bunnei
50bfacca88 core: hle: kernel: k_memory_block: Refresh. 2022-11-03 21:17:06 -07:00
bunnei
0cb9bc12fc core: hle: kernel: k_page_heap: Refresh. 2022-11-03 21:17:06 -07:00
bunnei
6257461684 core: hle: kernel: k_page_group: Add KPageBufferSlabHeap. 2022-11-03 21:17:06 -07:00
bunnei
d353c45f7d core: hle: kernel: k_system_control: Add SecureAppletMemorySize. 2022-11-03 21:17:06 -07:00
bunnei
f76b4417e6 core: hle: kernel: k_page_buffer: Add KPageBufferSlabHeap. 2022-11-03 21:17:06 -07:00
bunnei
0897f4f96c core: hle: kernel: Add KPageTableManager. 2022-11-03 21:17:06 -07:00
bunnei
6d4f411c08 core: hle: kernel: Add KPageTableSlabHeap. 2022-11-03 21:17:06 -07:00
bunnei
37b17252d1 core: hle: kernel: Add KEventInfo. 2022-11-03 21:17:06 -07:00
bunnei
ddd3f48736 core: hle: kernel: Add KDebug. 2022-11-03 21:17:06 -07:00
bunnei
46322be735 core: hle: result: Fix code for compilers. 2022-11-03 21:17:06 -07:00
Fernando S
3794851f7f Merge pull request #9154 from liamwhite/new-fb
vk_blit_screen: recreate swapchain images on guest format change
2022-11-04 01:25:34 +01:00
Morph
74275d0968 Merge pull request #9097 from liamwhite/intel-spv-compiler
video_core: don't build ASTC decoder shader unless requested
2022-11-03 19:29:33 -04:00
bunnei
ca0d9ef4b8 Merge pull request #9166 from Docteh/tx-update-20221102021425
Manually import Ukrainian language file
2022-11-02 22:23:52 -07:00
bunnei
846b6fba82 Merge pull request #9157 from yuzu-emu/acc-stored-users
core: hle: service: acc: Fix ListOpenContextStoredUsers/StoreOpenContext.
2022-11-02 19:42:04 -07:00
bunnei
75ab52f05b core: hle: service: acc: Fix ListOpenContextStoredUsers/StoreOpenContext.
- These APIs are used to capture the opened users and allow that state to be persisted across processes.
- They are not intended to just return the system opened users, that is what ListOpenUsers is for.
- Fixes the launch hang with Bayonetta 3.
2022-11-02 16:09:30 -07:00
bunnei
83f649240e Merge pull request #9171 from driskiou/fix-menu-separator
remove unnecessary sepator in file menu (main.ui)
2022-11-02 10:17:39 -07:00
Ludovic
f325fcb131 remove unnecessary sepator in file menu (main.ui) 2022-11-02 17:33:53 +01:00
Kyle Kienapfel
5e4ab2d42c Manually import Ukrainian language files
I'm not sure if GillianMC and I can claim that yuzu is first emulator translated to Ukrainian until the language files are used in builds.
2022-11-02 02:22:18 -07:00
liamwhite
de4afde065 Merge pull request #9143 from K0bin/scheduler-empty
vk_scheduler: Remove recorded_counts
2022-10-31 21:37:46 -04:00
Liam
77b74f5d95 sm:: avoid excessive port recreation 2022-10-31 17:47:39 -04:00
Liam
633411c20f kernel: fix single core for service threads 2022-10-31 17:45:46 -04:00
Liam
2228383322 kernel: fix port tracking 2022-10-31 17:45:46 -04:00
Liam
7aa91c8d9c k_server_session: add SendReplyHLE 2022-10-31 17:45:45 -04:00
Liam
7837185f0a service_thread: convert to map for session management 2022-10-31 17:44:07 -04:00
Liam
983f2b7074 kernel: invert session request handling flow 2022-10-31 17:44:06 -04:00
bunnei
7f0d0dd177 Merge pull request #9159 from liamwhite/kbork
kernel: more complete fix for KPort reference counting
2022-10-31 11:18:17 -07:00
FengChen
b42b894785 video_core: Fix drawing trigger mechanism regression 2022-10-31 21:57:38 +08:00
Liam
4e9adae5da kernel: more complete fix for KPort reference counting 2022-10-31 08:23:29 -04:00
bunnei
f39d2cf78b Merge pull request #9155 from FernandoS27/goosfraba
Vulkan: Fix regression caused by limiting render area to width/height of render targets.
2022-10-30 17:34:28 -07:00
Fernando S
d8ff939edc Merge pull request #9158 from liamwhite/single-bore
k_thread: fix single core
2022-10-31 00:58:07 +01:00
Liam
eec3184bb0 k_thread: fix single core 2022-10-30 18:44:29 -04:00
Fernando Sahmkow
67e0d38152 Vulkan: Fix regression caused by limiting render area to width/height of rendef targets. 2022-10-30 21:24:28 +01:00
Liam
808e22984f vk_blit_screen: recreate swapchain images on guest format change 2022-10-30 15:04:16 -04:00
Morph
e09756b2df Merge pull request #9151 from liamwhite/dram-size
kernel: reinitialize after dram layout change
2022-10-30 13:23:57 -04:00
Liam
8f00c59462 kernel: reinitialize after dram layout change 2022-10-30 11:01:22 -04:00
liamwhite
1cdd2d5204 Merge pull request #9091 from Docteh/what_compat_list
UI: Add option to hide the compatibility list
2022-10-30 09:17:16 -04:00
bunnei
ccfdb7c1af Merge pull request #9149 from german77/volum
service: am: Stub SetRecordVolumeMuted
2022-10-29 23:36:15 -07:00
german77
6f0f7f1547 service: am: Stub SetRecordVolumeMuted
Used by bayonetta 3
2022-10-30 00:34:33 -05:00
bunnei
316a2c1715 Merge pull request #9148 from liamwhite/crab-language-at-home
k_server_session: fix crashes
2022-10-29 21:03:53 -07:00
Robin Kertels
dce242858a vk_scheduler: Remove recorded_counts 2022-10-28 03:42:43 +02:00
Liam
9524e28d20 video_core: don't build ASTC decoder shader unless requested 2022-10-19 18:52:42 -04:00
Kyle Kienapfel
470e89a8ed UI: Add option to hide the compatibility list
Option is added directly below the option for the addons column

Defaulting to hide compatibility list. Changing default works properly.

Co-authored-by: Piplup <piplup55@users.noreply.github.com>
2022-10-19 03:51:51 -07:00
Feng Chen
c864cb5772 Merge branch 'master' into mipmap 2022-09-20 11:56:43 +08:00
FengChen
9a95c7fa14 video_core: Generate mipmap texture by drawing 2022-09-20 11:55:43 +08:00
126 changed files with 11273 additions and 1274 deletions

View File

@@ -218,11 +218,11 @@ if(ENABLE_QT)
set(QT_VERSION 5.15)
# Check for system Qt on Linux, fallback to bundled Qt
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
if (UNIX AND NOT APPLE)
if (NOT YUZU_USE_BUNDLED_QT)
find_package(Qt5 ${QT_VERSION} COMPONENTS Widgets DBus Multimedia)
endif()
if (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT)
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT))
# Check for dependencies, then enable bundled Qt download
# Check that the system GLIBCXX version is compatible
@@ -323,7 +323,7 @@ if(ENABLE_QT)
set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")
endif()
if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT)
if (UNIX AND NOT APPLE AND YUZU_USE_BUNDLED_QT)
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
else()
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})

7321
dist/languages/uk.ts vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -190,11 +190,13 @@ add_library(core STATIC
hle/kernel/k_code_memory.h
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
hle/kernel/k_debug.h
hle/kernel/k_dynamic_page_manager.h
hle/kernel/k_dynamic_resource_manager.h
hle/kernel/k_dynamic_slab_heap.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_event_info.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_interrupt_manager.cpp
@@ -222,6 +224,8 @@ add_library(core STATIC
hle/kernel/k_page_group.h
hle/kernel/k_page_table.cpp
hle/kernel/k_page_table.h
hle/kernel/k_page_table_manager.h
hle/kernel/k_page_table_slab_heap.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_priority_queue.h
@@ -254,6 +258,8 @@ add_library(core STATIC
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_system_control.h
hle/kernel/k_system_resource.cpp
hle/kernel/k_system_resource.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_thread_local_page.cpp

View File

@@ -137,6 +137,7 @@ struct System::Impl {
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
core_timing.SetMulticore(is_multicore);
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
@@ -166,13 +167,18 @@ struct System::Impl {
}
void ReinitializeIfNecessary(System& system) {
if (is_multicore == Settings::values.use_multi_core.GetValue()) {
const bool must_reinitialize =
is_multicore != Settings::values.use_multi_core.GetValue() ||
extended_memory_layout != Settings::values.use_extended_memory_layout.GetValue();
if (!must_reinitialize) {
return;
}
LOG_DEBUG(Kernel, "Re-initializing");
is_multicore = Settings::values.use_multi_core.GetValue();
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
Initialize(system);
}
@@ -521,6 +527,7 @@ struct System::Impl {
bool is_multicore{};
bool is_async_gpu{};
bool extended_memory_layout{};
ExecuteProgramCallback execute_program_callback;
ExitCallback exit_callback;

View File

@@ -86,13 +86,13 @@ public:
u32 num_domain_objects{};
const bool always_move_handles{
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) {
if (!ctx.GetManager()->IsDomain() || always_move_handles) {
num_handles_to_move = num_objects_to_move;
} else {
num_domain_objects = num_objects_to_move;
}
if (ctx.Session()->GetSessionRequestManager()->IsDomain()) {
if (ctx.GetManager()->IsDomain()) {
raw_data_size +=
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
ctx.write_size += num_domain_objects;
@@ -125,8 +125,7 @@ public:
if (!ctx.IsTipc()) {
AlignWithPadding();
if (ctx.Session()->GetSessionRequestManager()->IsDomain() &&
ctx.HasDomainMessageHeader()) {
if (ctx.GetManager()->IsDomain() && ctx.HasDomainMessageHeader()) {
IPC::DomainMessageHeader domain_header{};
domain_header.num_objects = num_domain_objects;
PushRaw(domain_header);
@@ -146,18 +145,18 @@ public:
template <class T>
void PushIpcInterface(std::shared_ptr<T> iface) {
if (context->Session()->GetSessionRequestManager()->IsDomain()) {
if (context->GetManager()->IsDomain()) {
context->AddDomainObject(std::move(iface));
} else {
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
Kernel::LimitableResource::Sessions, 1);
auto* session = Kernel::KSession::Create(kernel);
session->Initialize(nullptr, iface->GetServiceName(),
std::make_shared<Kernel::SessionRequestManager>(kernel));
session->Initialize(nullptr, iface->GetServiceName());
iface->RegisterSession(&session->GetServerSession(),
std::make_shared<Kernel::SessionRequestManager>(kernel));
context->AddMoveObject(&session->GetClientSession());
iface->ClientConnected(&session->GetServerSession());
}
}
@@ -387,7 +386,7 @@ public:
template <class T>
std::weak_ptr<T> PopIpcInterface() {
ASSERT(context->Session()->GetSessionRequestManager()->IsDomain());
ASSERT(context->GetManager()->IsDomain());
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
return context->GetDomainHandler<T>(Pop<u32>() - 1);
}

View File

@@ -8,6 +8,10 @@
namespace Kernel::Board::Nintendo::Nx {
class KSystemControl {
public:
// This can be overridden as needed.
static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB
public:
class Init {
public:

View File

@@ -16,6 +16,7 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -26,18 +27,28 @@ namespace Kernel {
SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
ServiceThreadType thread_type)
: kernel{kernel_} {
if (thread_type == ServiceThreadType::CreateNew) {
service_thread = kernel.CreateServiceThread(service_name_);
} else {
service_thread = kernel.GetDefaultServiceThread();
}
}
: kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew
? kernel.CreateServiceThread(service_name_)
: kernel.GetDefaultServiceThread()} {}
SessionRequestHandler::~SessionRequestHandler() {
kernel.ReleaseServiceThread(service_thread);
}
void SessionRequestHandler::AcceptSession(KServerPort* server_port) {
auto* server_session = server_port->AcceptSession();
ASSERT(server_session != nullptr);
RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel));
}
void SessionRequestHandler::RegisterSession(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager) {
manager->SetSessionHandler(shared_from_this());
service_thread.RegisterServerSession(server_session, manager);
server_session->Close();
}
SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {}
SessionRequestManager::~SessionRequestManager() = default;
@@ -92,7 +103,7 @@ Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_ses
}
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
context.SetSessionRequestManager(server_session->GetSessionRequestManager());
ASSERT(context.GetManager().get() == this);
// If there is a DomainMessageHeader, then this is CommandType "Request"
const auto& domain_message_header = context.GetDomainMessageHeader();
@@ -130,31 +141,6 @@ Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_ses
return ResultSuccess;
}
Result SessionRequestManager::QueueSyncRequest(KSession* parent,
std::shared_ptr<HLERequestContext>&& context) {
// Ensure we have a session request handler
if (this->HasSessionRequestHandler(*context)) {
if (auto strong_ptr = this->GetServiceThread().lock()) {
strong_ptr->QueueSyncRequest(*parent, std::move(context));
} else {
ASSERT_MSG(false, "strong_ptr is nullptr!");
}
} else {
ASSERT_MSG(false, "handler is invalid!");
}
return ResultSuccess;
}
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->GetSessionRequestManager()->SetSessionHandler(shared_from_this());
// Ensure our server session is tracked globally.
kernel.RegisterServerObject(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {}
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
KServerSession* server_session_, KThread* thread_)
: server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
@@ -214,7 +200,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
// Padding to align to 16 bytes
rp.AlignWithPadding();
if (Session()->GetSessionRequestManager()->IsDomain() &&
if (GetManager()->IsDomain() &&
((command_header->type == IPC::CommandType::Request ||
command_header->type == IPC::CommandType::RequestWithContext) ||
!incoming)) {
@@ -223,7 +209,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
if (incoming || domain_message_header) {
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
} else {
if (Session()->GetSessionRequestManager()->IsDomain()) {
if (GetManager()->IsDomain()) {
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
}
}
@@ -316,12 +302,11 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
if (server_session->GetSessionRequestManager()->IsDomain()) {
if (GetManager()->IsDomain()) {
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
for (auto& object : outgoing_domain_objects) {
server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object));
cmd_buf[current_offset++] = static_cast<u32_le>(
server_session->GetSessionRequestManager()->DomainHandlerCount());
GetManager()->AppendDomainHandler(std::move(object));
cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount());
}
}

View File

@@ -45,11 +45,13 @@ class KAutoObject;
class KernelCore;
class KEvent;
class KHandleTable;
class KServerPort;
class KProcess;
class KServerSession;
class KThread;
class KReadableEvent;
class KSession;
class SessionRequestManager;
class ServiceThread;
enum class ThreadWakeupReason;
@@ -76,27 +78,17 @@ public:
virtual Result HandleSyncRequest(Kernel::KServerSession& session,
Kernel::HLERequestContext& context) = 0;
/**
* Signals that a client has just connected to this HLE handler and keeps the
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
void ClientConnected(KServerSession* session);
void AcceptSession(KServerPort* server_port);
void RegisterSession(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager);
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
void ClientDisconnected(KServerSession* session);
std::weak_ptr<ServiceThread> GetServiceThread() const {
ServiceThread& GetServiceThread() const {
return service_thread;
}
protected:
KernelCore& kernel;
std::weak_ptr<ServiceThread> service_thread;
ServiceThread& service_thread;
};
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
@@ -162,7 +154,7 @@ public:
session_handler = std::move(handler);
}
std::weak_ptr<ServiceThread> GetServiceThread() const {
ServiceThread& GetServiceThread() const {
return session_handler->GetServiceThread();
}
@@ -170,7 +162,6 @@ public:
Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context);
private:
bool convert_to_domain{};
@@ -350,11 +341,11 @@ public:
template <typename T>
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock());
}
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
manager = std::move(manager_);
manager = manager_;
}
std::string Description() const;
@@ -363,6 +354,10 @@ public:
return *thread;
}
std::shared_ptr<SessionRequestManager> GetManager() const {
return manager.lock();
}
private:
friend class IPC::ResponseBuilder;
@@ -396,7 +391,7 @@ private:
u32 handles_offset{};
u32 domain_offset{};
std::weak_ptr<SessionRequestManager> manager;
std::weak_ptr<SessionRequestManager> manager{};
KernelCore& kernel;
Core::Memory::Memory& memory;

View File

@@ -10,7 +10,9 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_debug.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_buffer.h"
@@ -22,6 +24,7 @@
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -44,7 +47,10 @@ namespace Kernel::Init {
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
namespace {
@@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
constexpr size_t SlabCountKIoPool = 1;
constexpr size_t SlabCountKIoRegion = 6;
constexpr size_t SlabcountKSessionRequestMappings = 40;
constexpr size_t SlabCountExtraKThread = 160;
constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread;
namespace test {
static_assert(KernelPageBufferHeapSize ==
2 * PageSize + (SlabCountKProcess + SlabCountKThread +
(SlabCountKProcess + SlabCountKThread) / 8) *
PageSize);
static_assert(KernelPageBufferAdditionalSize ==
(SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
} // namespace test
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
@@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
}
size_t CalculateSlabHeapGapSize() {
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
@@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
.num_KDebug = SlabCountKDebug,
.num_KIoPool = SlabCountKIoPool,
.num_KIoRegion = SlabCountKIoRegion,
.num_KSessionRequestMappings = SlabcountKSessionRequestMappings,
};
}
@@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
return size;
}
void InitializeKPageBufferSlabHeap(Core::System& system) {
auto& kernel = system.Kernel();
const auto& counts = kernel.SlabResourceCounts();
const size_t num_pages =
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
const size_t slab_size = num_pages * PageSize;
// Reserve memory from the system resource limit.
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size);
}
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
@@ -258,3 +254,29 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
} // namespace Kernel::Init
namespace Kernel {
void KPageBufferSlabHeap::Initialize(Core::System& system) {
auto& kernel = system.Kernel();
const auto& counts = kernel.SlabResourceCounts();
const size_t num_pages =
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
const size_t slab_size = num_pages * PageSize;
// Reserve memory from the system resource limit.
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size);
}
} // namespace Kernel

View File

@@ -33,11 +33,11 @@ struct KSlabResourceCounts {
size_t num_KDebug;
size_t num_KIoPool;
size_t num_KIoRegion;
size_t num_KSessionRequestMappings;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
void InitializeKPageBufferSlabHeap(Core::System& system);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init

View File

@@ -16,6 +16,7 @@
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject,
// static_assert(std::is_final_v<KCodeMemory> &&
// std::is_base_of_v<KAutoObject, KCodeMemory>);
static_assert(std::is_base_of_v<KAutoObject, KSystemResource>);
} // namespace Kernel

View File

@@ -10,6 +10,8 @@ namespace Kernel {
class KAutoObject;
class KSystemResource;
class KClassTokenGenerator {
public:
using TokenBaseType = u16;
@@ -58,7 +60,7 @@ private:
if constexpr (std::is_same<T, KAutoObject>::value) {
static_assert(T::ObjectType == ObjectType::KAutoObject);
return 0;
} else if constexpr (!std::is_final<T>::value) {
} else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {
static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
T::ObjectType < ObjectType::BaseClassesEnd);
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
@@ -108,6 +110,8 @@ public:
KSessionRequest,
KCodeMemory,
KSystemResource,
// NOTE: True order for these has not been determined yet.
KAlpha,
KBeta,

View File

@@ -58,8 +58,7 @@ bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
Result KClientPort::CreateSession(KClientSession** out,
std::shared_ptr<SessionRequestManager> session_manager) {
Result KClientPort::CreateSession(KClientSession** out) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::Sessions);
@@ -104,7 +103,7 @@ Result KClientPort::CreateSession(KClientSession** out,
}
// Initialize the session.
session->Initialize(this, parent->GetName(), session_manager);
session->Initialize(this, parent->GetName());
// Commit the session reservation.
session_reservation.Commit();

View File

@@ -52,8 +52,7 @@ public:
void Destroy() override;
bool IsSignaled() const override;
Result CreateSession(KClientSession** out,
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
Result CreateSession(KClientSession** out);
private:
std::atomic<s32> num_sessions{};

View File

@@ -0,0 +1,20 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
public:
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
};
} // namespace Kernel

View File

@@ -3,6 +3,8 @@
#pragma once
#include <vector>
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
@@ -33,28 +35,36 @@ public:
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
Result Initialize(VAddr addr, size_t sz) {
Result Initialize(VAddr memory, size_t size, size_t align) {
// We need to have positive size.
R_UNLESS(sz > 0, ResultOutOfMemory);
m_backing_memory.resize(sz);
R_UNLESS(size > 0, ResultOutOfMemory);
m_backing_memory.resize(size);
// Calculate management overhead.
const size_t management_size =
KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
const size_t allocatable_size = sz - management_size;
// Set addresses.
m_address = memory;
m_aligned_address = Common::AlignDown(memory, align);
// Calculate extents.
const size_t managed_size = m_address + size - m_aligned_address;
const size_t overhead_size = Common::AlignUp(
KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)),
sizeof(PageBuffer));
R_UNLESS(overhead_size < size, ResultOutOfMemory);
// Set tracking fields.
m_address = addr;
m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
m_count = allocatable_size / sizeof(PageBuffer);
R_UNLESS(m_count > 0, ResultOutOfMemory);
m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer));
m_count = m_size / sizeof(PageBuffer);
// Clear the management region.
u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
std::memset(management_ptr, 0, management_size);
u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size);
std::memset(management_ptr, 0, overhead_size);
// Initialize the bitmap.
m_page_bitmap.Initialize(management_ptr, m_count);
const size_t allocatable_region_size =
(m_address + size - overhead_size) - m_aligned_address;
ASSERT(allocatable_region_size >= sizeof(PageBuffer));
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
// Free the pages to the bitmap.
for (size_t i = 0; i < m_count; i++) {
@@ -62,7 +72,8 @@ public:
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
// Set the bit for the free page.
m_page_bitmap.SetBit(i);
m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) /
sizeof(PageBuffer));
}
R_SUCCEED();
@@ -101,7 +112,28 @@ public:
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_address) + offset;
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
PageBuffer* Allocate(size_t count) {
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Find a random free block.
s64 soffset = m_page_bitmap.FindFreeRange(count);
if (soffset < 0) [[likely]] {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
// Update our tracking.
m_page_bitmap.ClearRange(offset, count);
m_used += count;
m_peak = std::max(m_peak, m_used);
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
void Free(PageBuffer* pb) {
@@ -113,7 +145,7 @@ public:
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
@@ -127,6 +159,7 @@ private:
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
VAddr m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.

View File

@@ -6,6 +6,7 @@
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
namespace Kernel {
@@ -51,8 +52,10 @@ private:
DynamicSlabType* m_slab_heap{};
};
class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {};
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
} // namespace Kernel

View File

@@ -0,0 +1,64 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <boost/intrusive/list.hpp>
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel {
class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> {
public:
struct InfoCreateThread {
u32 thread_id{};
uintptr_t tls_address{};
};
struct InfoExitProcess {
Svc::ProcessExitReason reason{};
};
struct InfoExitThread {
Svc::ThreadExitReason reason{};
};
struct InfoException {
Svc::DebugException exception_type{};
s32 exception_data_count{};
uintptr_t exception_address{};
std::array<uintptr_t, 4> exception_data{};
};
struct InfoSystemCall {
s64 tick{};
s32 id{};
};
public:
KEventInfo() = default;
~KEventInfo() = default;
public:
Svc::DebugEvent event{};
u32 thread_id{};
u32 flags{};
bool is_attached{};
bool continue_flag{};
bool ignore_continue{};
bool close_once{};
union {
InfoCreateThread create_thread;
InfoExitProcess exit_process;
InfoExitThread exit_thread;
InfoException exception;
InfoSystemCall system_call;
} info{};
KThread* debug_thread{};
};
} // namespace Kernel

View File

@@ -5,14 +5,11 @@
namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable::~KHandleTable() = default;
Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
@@ -25,28 +22,28 @@ Result KHandleTable::Finalize() {
}
}
return ResultSuccess;
R_SUCCEED();
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (Svc::IsPseudoHandle(handle)) {
if (Svc::IsPseudoHandle(handle)) [[unlikely]] {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) {
if (handle_pack.reserved != 0) [[unlikely]] {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
if (this->IsValidHandle(handle)) {
if (this->IsValidHandle(handle)) [[likely]] {
const auto index = handle_pack.index;
obj = m_objects[index];
@@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) {
}
// Close the object.
kernel.UnregisterInUseObject(obj);
m_kernel.UnregisterInUseObject(obj);
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
@@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
}
return ResultSuccess;
R_SUCCEED();
}
Result KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
*out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
return ResultSuccess;
R_SUCCEED();
}
void KHandleTable::Unreserve(Handle handle) {
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) {
if (index < m_table_size) [[likely]] {
// NOTE: This code does not check the linear id.
ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
@@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) {
}
void KHandleTable::Register(Handle handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) {
if (index < m_table_size) [[likely]] {
// Set the entry.
ASSERT(m_objects[index] == nullptr);

View File

@@ -21,33 +21,38 @@ namespace Kernel {
class KernelCore;
class KHandleTable {
public:
YUZU_NON_COPYABLE(KHandleTable);
YUZU_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
public:
explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
// Check that the table size is valid.
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Lock.
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Free all entries.
for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_free_head_index = i;
}
return ResultSuccess;
R_SUCCEED();
}
size_t GetTableSize() const {
@@ -66,13 +71,13 @@ public:
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
return obj->DynamicCast<T*>();
} else {
return nullptr;
@@ -85,13 +90,13 @@ public:
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
auto* const cur_process = kernel.CurrentProcess();
auto* const cur_process = m_kernel.CurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == Svc::PseudoHandle::CurrentThread) {
auto* const cur_thread = GetCurrentThreadPointer(kernel);
auto* const cur_thread = GetCurrentThreadPointer(m_kernel);
ASSERT(cur_thread != nullptr);
return cur_thread;
}
@@ -100,6 +105,37 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == Svc::PseudoHandle::CurrentProcess) {
auto* const cur_process =
static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess()));
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == Svc::PseudoHandle::CurrentThread) {
return static_cast<KAutoObject*>(cur_thread);
}
return GetObjectForIpcWithoutPseudoHandle(handle);
}
KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const {
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
return this->GetObjectByIndexImpl(out_handle, index);
}
Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
@@ -112,7 +148,7 @@ public:
size_t num_opened;
{
// Lock the table.
KScopedDisableDispatch dd(kernel);
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
@@ -120,13 +156,13 @@ public:
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) {
if (cur_object == nullptr) [[unlikely]] {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) {
if (cur_t == nullptr) [[unlikely]] {
break;
}
@@ -137,7 +173,7 @@ public:
}
// If we converted every object, succeed.
if (num_opened == num_handles) {
if (num_opened == num_handles) [[likely]] {
return true;
}
@@ -191,21 +227,21 @@ private:
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) {
if (raw_value == 0) [[unlikely]] {
return false;
}
if (linear_id == 0) {
if (linear_id == 0) [[unlikely]] {
return false;
}
if (index >= m_table_size) {
if (index >= m_table_size) [[unlikely]] {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) {
if (m_objects[index] == nullptr) [[unlikely]] {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) {
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
return false;
}
@@ -215,11 +251,11 @@ private:
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) {
if (handle_pack.reserved != 0) [[unlikely]] {
return nullptr;
}
if (this->IsValidHandle(handle)) {
if (this->IsValidHandle(handle)) [[likely]] {
return m_objects[handle_pack.index];
} else {
return nullptr;
@@ -227,9 +263,8 @@ private:
}
KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
// Index must be in bounds.
if (index >= m_table_size) {
if (index >= m_table_size) [[unlikely]] {
return nullptr;
}
@@ -244,18 +279,15 @@ private:
private:
union HandlePack {
HandlePack() = default;
HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw;
u32 raw{};
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
@@ -264,6 +296,10 @@ private:
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
@@ -271,21 +307,21 @@ private:
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s16 GetNextFreeIndex() const {
constexpr s32 GetNextFreeIndex() const {
return next_free_index;
}
};
private:
KernelCore& m_kernel;
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
std::array<KAutoObject*, MaxTableSize> m_objects{};
s32 m_free_head_index{-1};
mutable KSpinLock m_lock;
s32 m_free_head_index{};
u16 m_table_size{};
u16 m_max_count{};
u16 m_next_linear_id{MinLinearId};
u16 m_next_linear_id{};
u16 m_count{};
mutable KSpinLock m_lock;
KernelCore& kernel;
};
} // namespace Kernel

View File

@@ -35,26 +35,32 @@ enum class KMemoryState : u32 {
FlagCanMapProcess = (1 << 23),
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
FlagLinearMapped = (1 << 26),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
FlagReferenceCounted | FlagCanChangeAttribute,
FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped,
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagReferenceCounted,
FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped,
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap |
FlagLinearMapped,
Free = static_cast<u32>(Svc::MemoryState::Free),
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
// Alias was removed after 1.0.0.
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
@@ -67,18 +73,18 @@ enum class KMemoryState : u32 {
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
ThreadLocal =
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc |
FlagCanUseNonDeviceIpc,
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
@@ -91,69 +97,69 @@ enum class KMemoryState : u32 {
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
FlagReferenceCounted | FlagCanDebug,
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped,
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
enum class KMemoryPermission : u8 {
None = 0,
All = static_cast<u8>(~None),
Read = 1 << 0,
Write = 1 << 1,
Execute = 1 << 2,
ReadAndWrite = Read | Write,
ReadAndExecute = Read | Execute,
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
Svc::MemoryPermission::Execute),
KernelShift = 3,
KernelRead = Read << KernelShift,
KernelWrite = Write << KernelShift,
KernelExecute = Execute << KernelShift,
KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift,
KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift,
KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift,
NotMapped = (1 << (2 * KernelShift)),
KernelReadWrite = KernelRead | KernelWrite,
KernelReadExecute = KernelRead | KernelExecute,
UserRead = Read | KernelRead,
UserWrite = Write | KernelWrite,
UserExecute = Execute,
UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead,
UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite,
UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute),
UserReadWrite = UserRead | UserWrite,
UserReadExecute = UserRead | UserExecute,
IpcLockChangeMask = NotMapped | UserReadWrite
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
Svc::MemoryPermission::Execute),
IpcLockChangeMask = NotMapped | UserReadWrite,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
@@ -468,6 +474,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// New permission/right aren't used.
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
@@ -478,6 +485,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForShareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
// New permission/left aren't used.
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
@@ -494,6 +502,8 @@ public:
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must either be shared or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
m_device_use_count == 0);
@@ -509,6 +519,7 @@ public:
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// New permission/right aren't used.
if (left) {
if (!m_device_disable_merge_left_count) {
@@ -528,6 +539,8 @@ public:
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
// New permission/left aren't used.
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
ASSERT(old_device_disable_merge_right_count > 0);
@@ -546,6 +559,8 @@ public:
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
@@ -563,6 +578,7 @@ public:
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
@@ -613,6 +629,8 @@ public:
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
[[maybe_unused]] bool right) {
// New permission isn't used.
// We must be locked.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);

View File

@@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_
}
}
size_t KMemoryLayout::GetResourceRegionSizeForInit() {
// Calculate resource region size based on whether we allow extra threads.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
size_t resource_region_size =
KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
return resource_region_size;
size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
return KernelResourceSize + KSystemControl::SecureAppletMemorySize +
(use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);
}
} // namespace Kernel

View File

@@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr std::size_t KernelResourceSize =
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
KernelSlabHeapSize + KernelPageBufferHeapSize;
constexpr bool IsKernelAddressKey(VAddr key) {
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
@@ -168,6 +170,11 @@ public:
KMemoryRegionType_VirtualDramKernelTraceBuffer));
}
const KMemoryRegion& GetSecureAppletMemoryRegion() {
return Dereference(GetVirtualMemoryRegionTree().FindByType(
KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
}
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
return Dereference(FindVirtualLinear(address));
}
@@ -229,7 +236,7 @@ public:
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
VAddr linear_virtual_start);
static size_t GetResourceRegionSizeForInit();
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
auto GetKernelRegionExtents() const {
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
@@ -279,6 +286,10 @@ public:
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionType_DramKernelSlab);
}
auto GetKernelSecureAppletMemoryRegionPhysicalExtents() {
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionType_DramKernelSecureAppletMemory);
}
auto GetKernelPageTableHeapRegionPhysicalExtents() const {
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionType_DramKernelPtHeap);

View File

@@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool::SystemNonSecure;
} else {
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
return {};
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
}
}
} // namespace
KMemoryManager::KMemoryManager(Core::System& system_)
: system{system_}, pool_locks{
KLightLock{system_.Kernel()},
KLightLock{system_.Kernel()},
KLightLock{system_.Kernel()},
KLightLock{system_.Kernel()},
} {}
KMemoryManager::KMemoryManager(Core::System& system)
: m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()},
m_pool_locks{
KLightLock{system.Kernel()},
KLightLock{system.Kernel()},
KLightLock{system.Kernel()},
KLightLock{system.Kernel()},
} {}
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
// Clear the management region to zero.
const VAddr management_region_end = management_region + management_region_size;
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
// Reset our manager count.
num_managers = 0;
m_num_managers = 0;
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
while (num_managers != MaxManagerCount) {
while (m_num_managers != MaxManagerCount) {
// Locate the region that should initialize the current manager.
PAddr region_address = 0;
size_t region_size = 0;
Pool region_pool = Pool::Count;
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
// We only care about regions that we need to create managers for.
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
continue;
}
// We want to initialize the managers in order.
if (it.GetAttributes() != num_managers) {
if (it.GetAttributes() != m_num_managers) {
continue;
}
@@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
}
// Initialize a new manager for the region.
Impl* manager = std::addressof(managers[num_managers++]);
ASSERT(num_managers <= managers.size());
Impl* manager = std::addressof(m_managers[m_num_managers++]);
ASSERT(m_num_managers <= m_managers.size());
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
management_region_end, region_pool);
@@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
// Insert the manager into the pool list.
const auto region_pool_index = static_cast<u32>(region_pool);
if (pool_managers_tail[region_pool_index] == nullptr) {
pool_managers_head[region_pool_index] = manager;
if (m_pool_managers_tail[region_pool_index] == nullptr) {
m_pool_managers_head[region_pool_index] = manager;
} else {
pool_managers_tail[region_pool_index]->SetNext(manager);
manager->SetPrev(pool_managers_tail[region_pool_index]);
m_pool_managers_tail[region_pool_index]->SetNext(manager);
manager->SetPrev(m_pool_managers_tail[region_pool_index]);
}
pool_managers_tail[region_pool_index] = manager;
m_pool_managers_tail[region_pool_index] = manager;
}
// Free each region to its corresponding heap.
@@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
const PAddr ini_last = ini_end - 1;
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
// Get the manager for the region.
auto index = it.GetAttributes();
auto& manager = managers[index];
auto& manager = m_managers[it.GetAttributes()];
const PAddr cur_start = it.GetAddress();
const PAddr cur_last = it.GetLastAddress();
@@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
}
// Update the used size for all managers.
for (size_t i = 0; i < num_managers; ++i) {
managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
for (size_t i = 0; i < m_num_managers; ++i) {
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
}
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
}
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
// Early return if we're allocating no pages.
if (num_pages == 0) {
@@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
// Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
@@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
PAddr allocated_block = 0;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
if (allocated_block != 0) {
break;
}
@@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
return 0;
}
// If we allocated more than we need, free some.
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
if (allocated_pages > num_pages) {
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
// Maintain the optimized memory bitmap, if we should.
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
UNIMPLEMENTED();
}
// Open the first reference to the pages.
@@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
Direction dir, bool random) {
Direction dir, bool unoptimized, bool random) {
// Choose a heap based on our page size request.
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
// Ensure that we don't leave anything un-freed.
auto group_guard = SCOPE_GUARD({
ON_RESULT_FAILURE {
for (const auto& it : out->Nodes()) {
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
const size_t num_pages_to_free =
auto& manager = this->GetManager(it.GetAddress());
const size_t node_num_pages =
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
manager.Free(it.GetAddress(), num_pages_to_free);
manager.Free(it.GetAddress(), node_num_pages);
}
});
out->Finalize();
};
// Keep allocating until we've allocated all our pages.
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
@@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
break;
}
// Safely add it to our group.
{
auto block_guard =
SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
block_guard.Cancel();
// Ensure we don't leak the block if we fail.
ON_RESULT_FAILURE_2 {
cur_manager->Free(allocated_block, pages_per_alloc);
};
// Add the block to our group.
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
// Maintain the optimized memory bitmap, if we should.
if (unoptimized) {
UNIMPLEMENTED();
}
num_pages -= pages_per_alloc;
@@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
R_UNLESS(num_pages == 0, ResultOutOfMemory);
// We succeeded!
group_guard.Cancel();
return ResultSuccess;
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
@@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
// Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
// Allocate the page group.
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir,
m_has_optimized_process[static_cast<size_t>(pool)], true));
// Open the first reference to the pages.
for (const auto& block : out->Nodes()) {
@@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
auto& manager = this->GetManager(cur_address);
// Process part or all of the block.
const size_t cur_pages =
@@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
}
}
return ResultSuccess;
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
u64 process_id, u8 fill_pattern) {
Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option,
u64 process_id, u8 fill_pattern) {
ASSERT(out != nullptr);
ASSERT(out->GetNumPages() == 0);
@@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
const auto [pool, dir] = DecodeOption(option);
// Allocate the memory.
bool optimized;
{
// Lock the pool that we're allocating from.
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
// Check if we have an optimized process.
const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)];
const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;
// Allocate the page group.
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized,
false));
// Open the first reference to the pages.
// Set whether we should optimize.
optimized = has_optimized && is_optimized;
}
// Perform optimized memory tracking, if we should.
if (optimized) {
// Iterate over the allocated blocks.
for (const auto& block : out->Nodes()) {
PAddr cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
// Get the manager for the current address.
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
// Get the block extents.
const PAddr block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.OpenFirst(cur_address, cur_pages);
// If it has no pages, we don't need to do anything.
if (block_pages == 0) {
continue;
}
// Advance.
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
// Fill all the pages that we need to fill.
bool any_new = false;
{
PAddr cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
auto& manager = this->GetManager(cur_address);
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
any_new =
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
// Advance.
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
// If there are new pages, update tracking for the allocation.
if (any_new) {
// Update tracking for the allocation.
PAddr cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
// Get the manager for the current address.
auto& manager = this->GetManager(cur_address);
// Lock the pool for the manager.
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
// Track some or all of the current pages.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.TrackOptimizedAllocation(cur_address, cur_pages);
// Advance.
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
}
}
// Set all the allocated memory.
for (const auto& block : out->Nodes()) {
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
block.GetSize());
}
return ResultSuccess;
}
void KMemoryManager::Open(PAddr address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
manager.Open(address, cur_pages);
} else {
// Set all the allocated memory.
for (const auto& block : out->Nodes()) {
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
block.GetSize());
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void KMemoryManager::Close(PAddr address, size_t num_pages) {
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
manager.Close(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void KMemoryManager::Close(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Close(node.GetAddress(), node.GetNumPages());
}
}
void KMemoryManager::Open(const KPageGroup& pg) {
for (const auto& node : pg.Nodes()) {
Open(node.GetAddress(), node.GetNumPages());
}
R_SUCCEED();
}
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
@@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
ASSERT(Common::IsAligned(total_management_size, PageSize));
// Setup region.
pool = p;
management_region = management;
page_reference_counts.resize(
m_pool = p;
m_management_region = management;
m_page_reference_counts.resize(
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
ASSERT(Common::IsAligned(management_region, PageSize));
ASSERT(Common::IsAligned(m_management_region, PageSize));
// Initialize the manager's KPageHeap.
heap.Initialize(address, size, management + manager_size, page_heap_size);
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
return total_management_size;
}
void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
UNREACHABLE();
}
void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
UNREACHABLE();
}
bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
u8 fill_pattern) {
UNREACHABLE();
}
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t optimize_map_size =

View File

@@ -21,11 +21,8 @@ namespace Kernel {
class KPageGroup;
class KMemoryManager final {
class KMemoryManager {
public:
YUZU_NON_COPYABLE(KMemoryManager);
YUZU_NON_MOVEABLE(KMemoryManager);
enum class Pool : u32 {
Application = 0,
Applet = 1,
@@ -45,16 +42,85 @@ public:
enum class Direction : u32 {
FromFront = 0,
FromBack = 1,
Shift = 0,
Mask = (0xF << Shift),
};
explicit KMemoryManager(Core::System& system_);
static constexpr size_t MaxManagerCount = 10;
explicit KMemoryManager(Core::System& system);
void Initialize(VAddr management_region, size_t management_region_size);
constexpr size_t GetSize(Pool pool) const {
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
u8 fill_pattern);
Pool GetPool(PAddr address) const {
return this->GetManager(address).GetPool();
}
void Open(PAddr address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
manager.Open(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void OpenFirst(PAddr address, size_t num_pages) {
// Repeatedly open references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
manager.OpenFirst(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void Close(PAddr address, size_t num_pages) {
// Repeatedly close references until we've done so for all pages.
while (num_pages) {
auto& manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
manager.Close(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
size_t GetSize() {
size_t total = 0;
for (size_t i = 0; i < m_num_managers; i++) {
total += m_managers[i].GetSize();
}
return total;
}
size_t GetSize(Pool pool) {
constexpr Direction GetSizeDirection = Direction::FromFront;
size_t total = 0;
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
@@ -64,18 +130,36 @@ public:
return total;
}
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
u8 fill_pattern);
size_t GetFreeSize() {
size_t total = 0;
for (size_t i = 0; i < m_num_managers; i++) {
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]);
total += m_managers[i].GetFreeSize();
}
return total;
}
static constexpr size_t MaxManagerCount = 10;
size_t GetFreeSize(Pool pool) {
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
void Close(PAddr address, size_t num_pages);
void Close(const KPageGroup& pg);
constexpr Direction GetSizeDirection = Direction::FromFront;
size_t total = 0;
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
manager = this->GetNextManager(manager, GetSizeDirection)) {
total += manager->GetFreeSize();
}
return total;
}
void Open(PAddr address, size_t num_pages);
void Open(const KPageGroup& pg);
void DumpFreeList(Pool pool) {
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
constexpr Direction DumpDirection = Direction::FromFront;
for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr;
manager = this->GetNextManager(manager, DumpDirection)) {
manager->DumpFreeList();
}
}
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -88,14 +172,13 @@ public:
}
static constexpr Pool GetPool(u32 option) {
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>
static_cast<u32>(Pool::Shift));
}
static constexpr Direction GetDirection(u32 option) {
return static_cast<Direction>(
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
static_cast<u32>(Direction::Shift));
return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >>
static_cast<u32>(Direction::Shift));
}
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
@@ -103,74 +186,88 @@ public:
}
private:
class Impl final {
class Impl {
public:
YUZU_NON_COPYABLE(Impl);
YUZU_NON_MOVEABLE(Impl);
static size_t CalculateManagementOverheadSize(size_t region_size);
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
}
public:
Impl() = default;
~Impl() = default;
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
Pool p);
VAddr AllocateBlock(s32 index, bool random) {
return heap.AllocateBlock(index, random);
PAddr AllocateBlock(s32 index, bool random) {
return m_heap.AllocateBlock(index, random);
}
void Free(VAddr addr, size_t num_pages) {
heap.Free(addr, num_pages);
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
return m_heap.AllocateAligned(index, num_pages, align_pages);
}
void Free(PAddr addr, size_t num_pages) {
m_heap.Free(addr, num_pages);
}
void SetInitialUsedHeapSize(size_t reserved_size) {
heap.SetInitialUsedSize(reserved_size);
m_heap.SetInitialUsedSize(reserved_size);
}
void InitializeOptimizedMemory() {
UNIMPLEMENTED();
}
void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
void TrackOptimizedAllocation(PAddr block, size_t num_pages);
bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return pool;
return m_pool;
}
constexpr size_t GetSize() const {
return heap.GetSize();
return m_heap.GetSize();
}
constexpr PAddr GetEndAddress() const {
return m_heap.GetEndAddress();
}
constexpr VAddr GetAddress() const {
return heap.GetAddress();
size_t GetFreeSize() const {
return m_heap.GetFreeSize();
}
constexpr VAddr GetEndAddress() const {
return heap.GetEndAddress();
void DumpFreeList() const {
UNIMPLEMENTED();
}
constexpr size_t GetPageOffset(PAddr address) const {
return heap.GetPageOffset(address);
return m_heap.GetPageOffset(address);
}
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
return heap.GetPageOffsetToEnd(address);
return m_heap.GetPageOffsetToEnd(address);
}
constexpr void SetNext(Impl* n) {
next = n;
m_next = n;
}
constexpr void SetPrev(Impl* n) {
prev = n;
m_prev = n;
}
constexpr Impl* GetNext() const {
return next;
return m_next;
}
constexpr Impl* GetPrev() const {
return prev;
return m_prev;
}
void OpenFirst(PAddr address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
const RefCount ref_count = (++page_reference_counts[index]);
const RefCount ref_count = (++m_page_reference_counts[index]);
ASSERT(ref_count == 1);
index++;
@@ -181,7 +278,7 @@ private:
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
const RefCount ref_count = (++page_reference_counts[index]);
const RefCount ref_count = (++m_page_reference_counts[index]);
ASSERT(ref_count > 1);
index++;
@@ -195,8 +292,8 @@ private:
size_t free_start = 0;
size_t free_count = 0;
while (index < end) {
ASSERT(page_reference_counts[index] > 0);
const RefCount ref_count = (--page_reference_counts[index]);
ASSERT(m_page_reference_counts[index] > 0);
const RefCount ref_count = (--m_page_reference_counts[index]);
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
if (ref_count == 0) {
@@ -208,7 +305,7 @@ private:
}
} else {
if (free_count > 0) {
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
free_count = 0;
}
}
@@ -217,44 +314,36 @@ private:
}
if (free_count > 0) {
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
}
}
static size_t CalculateManagementOverheadSize(size_t region_size);
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
}
private:
using RefCount = u16;
KPageHeap heap;
std::vector<RefCount> page_reference_counts;
VAddr management_region{};
Pool pool{};
Impl* next{};
Impl* prev{};
KPageHeap m_heap;
std::vector<RefCount> m_page_reference_counts;
VAddr m_management_region{};
Pool m_pool{};
Impl* m_next{};
Impl* m_prev{};
};
private:
Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
Impl& GetManager(PAddr address) {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
const Impl& GetManager(PAddr address) const {
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
}
constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
: pool_managers_head[static_cast<size_t>(pool)];
constexpr Impl* GetFirstManager(Pool pool, Direction dir) {
return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)]
: m_pool_managers_head[static_cast<size_t>(pool)];
}
constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
constexpr Impl* GetNextManager(Impl* cur, Direction dir) {
if (dir == Direction::FromBack) {
return cur->GetPrev();
} else {
@@ -263,15 +352,21 @@ private:
}
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
bool random);
bool unoptimized, bool random);
private:
Core::System& system;
std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
std::array<Impl*, MaxManagerCount> pool_managers_head{};
std::array<Impl*, MaxManagerCount> pool_managers_tail{};
std::array<Impl, MaxManagerCount> managers;
size_t num_managers{};
template <typename T>
using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>;
Core::System& m_system;
const KMemoryLayout& m_memory_layout;
PoolArray<KLightLock> m_pool_locks;
std::array<Impl*, MaxManagerCount> m_pool_managers_head{};
std::array<Impl*, MaxManagerCount> m_pool_managers_tail{};
std::array<Impl, MaxManagerCount> m_managers;
size_t m_num_managers{};
PoolArray<u64> m_optimized_process_ids{};
PoolArray<bool> m_has_optimized_process{};
};
} // namespace Kernel

View File

@@ -142,32 +142,38 @@ private:
} // namespace impl
constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
static_assert(KMemoryRegionType_Dram.GetValue() == 0x2);
constexpr auto KMemoryRegionType_DramKernelBase =
// constexpr inline auto KMemoryRegionType_CoreLocalRegion =
// KMemoryRegionType_None.DeriveInitial(2).Finalize();
// static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4);
constexpr inline auto KMemoryRegionType_DramKernelBase =
KMemoryRegionType_Dram.DeriveSparse(0, 3, 0)
.SetAttribute(KMemoryRegionAttr_NoUserMap)
.SetAttribute(KMemoryRegionAttr_CarveoutProtected);
constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
constexpr auto KMemoryRegionType_DramHeapBase =
constexpr inline auto KMemoryRegionType_DramReservedBase =
KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
constexpr inline auto KMemoryRegionType_DramHeapBase =
KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelBase.GetValue() ==
(0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));
static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped));
constexpr auto KMemoryRegionType_DramKernelCode =
constexpr inline auto KMemoryRegionType_DramKernelCode =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0);
constexpr auto KMemoryRegionType_DramKernelSlab =
constexpr inline auto KMemoryRegionType_DramKernelSlab =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1);
constexpr auto KMemoryRegionType_DramKernelPtHeap =
constexpr inline auto KMemoryRegionType_DramKernelPtHeap =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(
KMemoryRegionAttr_LinearMapped);
constexpr auto KMemoryRegionType_DramKernelInitPt =
constexpr inline auto KMemoryRegionType_DramKernelInitPt =
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(
KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelCode.GetValue() ==
@@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
(0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
constexpr auto KMemoryRegionType_DramReservedEarly =
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
constexpr inline auto KMemoryRegionType_DramReservedEarly =
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() ==
(0x16 | KMemoryRegionAttr_NoUserMap));
constexpr auto KMemoryRegionType_KernelTraceBuffer =
constexpr inline auto KMemoryRegionType_KernelTraceBuffer =
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0)
.SetAttribute(KMemoryRegionAttr_LinearMapped)
.SetAttribute(KMemoryRegionAttr_UserReadOnly);
constexpr auto KMemoryRegionType_OnMemoryBootImage =
constexpr inline auto KMemoryRegionType_OnMemoryBootImage =
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1);
constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
constexpr inline auto KMemoryRegionType_DTB =
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() ==
(0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));
static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);
static_assert(KMemoryRegionType_DTB.GetValue() == 0x256);
constexpr auto KMemoryRegionType_DramPoolPartition =
constexpr inline auto KMemoryRegionType_DramPoolPartition =
KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr auto KMemoryRegionType_DramPoolManagement =
constexpr inline auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
KMemoryRegionAttr_CarveoutProtected);
constexpr auto KMemoryRegionType_DramUserPool =
constexpr inline auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
@@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
constexpr auto KMemoryRegionType_DramSystemNonSecurePool =
constexpr inline auto KMemoryRegionType_DramApplicationPool =
KMemoryRegionType_DramUserPool.Derive(4, 0);
constexpr inline auto KMemoryRegionType_DramAppletPool =
KMemoryRegionType_DramUserPool.Derive(4, 1);
constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
KMemoryRegionType_DramUserPool.Derive(4, 2);
constexpr auto KMemoryRegionType_DramSystemPool =
constexpr inline auto KMemoryRegionType_DramSystemPool =
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
@@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap =
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
// UNUSED: .DeriveSparse(2, 2, 0);
constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr auto KMemoryRegionType_VirtualDramUserPool =
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying
// to understand why Nintendo made this choice.
// NOTE: For unknown reason, the pools are derived out-of-order here.
// It's worth eventually trying to understand why Nintendo made this choice.
// UNUSED: .Derive(6, 0);
// UNUSED: .Derive(6, 1);
constexpr auto KMemoryRegionType_VirtualDramAppletPool =
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
constexpr auto KMemoryRegionType_VirtualDramApplicationPool =
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
constexpr auto KMemoryRegionType_VirtualDramSystemPool =
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
constexpr auto KMemoryRegionType_ArchDeviceBase =
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
constexpr auto KMemoryRegionType_BoardDeviceBase =
constexpr inline auto KMemoryRegionType_BoardDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5);
static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
@@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
#error "Unimplemented"
#else
// Default to no architecture devices.
constexpr auto NumArchitectureDeviceRegions = 0;
constexpr inline auto NumArchitectureDeviceRegions = 0;
#endif
static_assert(NumArchitectureDeviceRegions >= 0);
@@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0);
#include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc"
#else
// Default to no board devices.
constexpr auto NumBoardDeviceRegions = 0;
constexpr inline auto NumBoardDeviceRegions = 0;
#endif
static_assert(NumBoardDeviceRegions >= 0);
constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
constexpr inline auto KMemoryRegionType_KernelStack =
KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19);
static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);
static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49);
static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89);
constexpr auto KMemoryRegionType_KernelMiscDerivedBase =
constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase =
KMemoryRegionType_KernelMisc.DeriveTransition();
static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);
// UNUSED: .Derive(7, 0);
constexpr auto KMemoryRegionType_KernelMiscMainStack =
constexpr inline auto KMemoryRegionType_KernelMiscMainStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
constexpr auto KMemoryRegionType_KernelMiscMappedDevice =
constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
constexpr auto KMemoryRegionType_KernelMiscExceptionStack =
constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
constexpr auto KMemoryRegionType_KernelMiscUnknownDebug =
constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
// UNUSED: .Derive(7, 5);
constexpr auto KMemoryRegionType_KernelMiscIdleStack =
constexpr inline auto KMemoryRegionType_KernelMiscIdleStack =
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49);
static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49);
@@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549);
static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349);
constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
constexpr inline auto KMemoryRegionType_KernelTemp =
KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
@@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View File

@@ -16,107 +16,126 @@
namespace Kernel {
class KPageBitmap {
private:
public:
class RandomBitGenerator {
private:
Common::TinyMT rng{};
u32 entropy{};
u32 bits_available{};
private:
void RefreshEntropy() {
entropy = rng.GenerateRandomU32();
bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>());
}
bool GenerateRandomBit() {
if (bits_available == 0) {
this->RefreshEntropy();
}
const bool rnd_bit = (entropy & 1) != 0;
entropy >>= 1;
--bits_available;
return rnd_bit;
}
public:
RandomBitGenerator() {
rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
}
std::size_t SelectRandomBit(u64 bitmap) {
u64 SelectRandomBit(u64 bitmap) {
u64 selected = 0;
u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2;
u64 cur_mask = (1ULL << cur_num_bits) - 1;
for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0;
cur_num_bits /= 2) {
const u64 high = (bitmap >> cur_num_bits);
const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits)));
while (cur_num_bits) {
const u64 low = (bitmap >> 0) & cur_mask;
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
bool choose_low;
if (high == 0) {
// If only low val is set, choose low.
choose_low = true;
} else if (low == 0) {
// If only high val is set, choose high.
choose_low = false;
} else {
// If both are set, choose random.
choose_low = this->GenerateRandomBit();
}
// If we chose low, proceed with low.
if (choose_low) {
bitmap = low;
selected += 0;
} else {
// Choose high if we have high and (don't have low or select high randomly).
if (high && (low == 0 || this->GenerateRandomBit())) {
bitmap = high;
selected += cur_num_bits;
} else {
bitmap = low;
selected += 0;
}
// Proceed.
cur_num_bits /= 2;
cur_mask >>= cur_num_bits;
}
return selected;
}
u64 GenerateRandom(u64 max) {
// Determine the number of bits we need.
const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max));
// Generate a random value of the desired bitwidth.
const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed));
// Adjust the value to be in range.
return rnd - ((rnd / max) * max);
}
private:
void RefreshEntropy() {
m_entropy = m_rng.GenerateRandomU32();
m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>());
}
bool GenerateRandomBit() {
if (m_bits_available == 0) {
this->RefreshEntropy();
}
const bool rnd_bit = (m_entropy & 1) != 0;
m_entropy >>= 1;
--m_bits_available;
return rnd_bit;
}
u64 GenerateRandomBits(u32 num_bits) {
u64 result = 0;
// Iteratively add random bits to our result.
while (num_bits > 0) {
// Ensure we have random bits to take from.
if (m_bits_available == 0) {
this->RefreshEntropy();
}
// Determine how many bits to take this round.
const auto cur_bits = std::min(num_bits, m_bits_available);
// Generate mask for our current bits.
const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;
// Add bits to output from our entropy.
result <<= cur_bits;
result |= (m_entropy & mask);
// Remove bits from our entropy.
m_entropy >>= cur_bits;
m_bits_available -= cur_bits;
// Advance.
num_bits -= cur_bits;
}
return result;
}
private:
Common::TinyMT m_rng;
u32 m_entropy{};
u32 m_bits_available{};
};
public:
static constexpr std::size_t MaxDepth = 4;
private:
std::array<u64*, MaxDepth> bit_storages{};
RandomBitGenerator rng{};
std::size_t num_bits{};
std::size_t used_depths{};
static constexpr size_t MaxDepth = 4;
public:
KPageBitmap() = default;
constexpr std::size_t GetNumBits() const {
return num_bits;
constexpr size_t GetNumBits() const {
return m_num_bits;
}
constexpr s32 GetHighestDepthIndex() const {
return static_cast<s32>(used_depths) - 1;
return static_cast<s32>(m_used_depths) - 1;
}
u64* Initialize(u64* storage, std::size_t size) {
u64* Initialize(u64* storage, size_t size) {
// Initially, everything is un-set.
num_bits = 0;
m_num_bits = 0;
// Calculate the needed bitmap depth.
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
ASSERT(used_depths <= MaxDepth);
m_used_depths = static_cast<size_t>(GetRequiredDepth(size));
ASSERT(m_used_depths <= MaxDepth);
// Set the bitmap pointers.
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
bit_storages[depth] = storage;
m_bit_storages[depth] = storage;
size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();
storage += size;
m_end_storages[depth] = storage;
}
return storage;
@@ -128,19 +147,19 @@ public:
if (random) {
do {
const u64 v = bit_storages[depth][offset];
const u64 v = m_bit_storages[depth][offset];
if (v == 0) {
// If depth is bigger than zero, then a previous level indicated a block was
// free.
ASSERT(depth == 0);
return -1;
}
offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v);
offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v);
++depth;
} while (depth < static_cast<s32>(used_depths));
} while (depth < static_cast<s32>(m_used_depths));
} else {
do {
const u64 v = bit_storages[depth][offset];
const u64 v = m_bit_storages[depth][offset];
if (v == 0) {
// If depth is bigger than zero, then a previous level indicated a block was
// free.
@@ -149,28 +168,69 @@ public:
}
offset = offset * Common::BitSize<u64>() + std::countr_zero(v);
++depth;
} while (depth < static_cast<s32>(used_depths));
} while (depth < static_cast<s32>(m_used_depths));
}
return static_cast<s64>(offset);
}
void SetBit(std::size_t offset) {
s64 FindFreeRange(size_t count) {
// Check that it is possible to find a range.
const u64* const storage_start = m_bit_storages[m_used_depths - 1];
const u64* const storage_end = m_end_storages[m_used_depths - 1];
// If we don't have a storage to iterate (or want more blocks than fit in a single storage),
// we can't find a free range.
if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) {
return -1;
}
// Walk the storages to select a random free range.
const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1);
const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1);
const u64 free_mask = (static_cast<u64>(1) << count) - 1;
size_t num_valid_options = 0;
s64 chosen_offset = -1;
for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) {
u64 storage = storage_start[storage_index];
for (size_t option = 0; option < options_per_storage; ++option) {
if ((storage & free_mask) == free_mask) {
// We've found a new valid option.
++num_valid_options;
// Select the Kth valid option with probability 1/K. This leads to an overall
// uniform distribution.
if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) {
// This is our first option, so select it.
chosen_offset = storage_index * Common::BitSize<u64>() + option * count;
}
}
storage >>= count;
}
}
// Return the random offset we chose.*/
return chosen_offset;
}
void SetBit(size_t offset) {
this->SetBit(this->GetHighestDepthIndex(), offset);
num_bits++;
m_num_bits++;
}
void ClearBit(std::size_t offset) {
void ClearBit(size_t offset) {
this->ClearBit(this->GetHighestDepthIndex(), offset);
num_bits--;
m_num_bits--;
}
bool ClearRange(std::size_t offset, std::size_t count) {
bool ClearRange(size_t offset, size_t count) {
s32 depth = this->GetHighestDepthIndex();
u64* bits = bit_storages[depth];
std::size_t bit_ind = offset / Common::BitSize<u64>();
if (count < Common::BitSize<u64>()) {
const std::size_t shift = offset % Common::BitSize<u64>();
u64* bits = m_bit_storages[depth];
size_t bit_ind = offset / Common::BitSize<u64>();
if (count < Common::BitSize<u64>()) [[likely]] {
const size_t shift = offset % Common::BitSize<u64>();
ASSERT(shift + count <= Common::BitSize<u64>());
// Check that all the bits are set.
const u64 mask = ((u64(1) << count) - 1) << shift;
@@ -189,8 +249,8 @@ public:
ASSERT(offset % Common::BitSize<u64>() == 0);
ASSERT(count % Common::BitSize<u64>() == 0);
// Check that all the bits are set.
std::size_t remaining = count;
std::size_t i = 0;
size_t remaining = count;
size_t i = 0;
do {
if (bits[bit_ind + i++] != ~u64(0)) {
return false;
@@ -209,18 +269,18 @@ public:
} while (remaining > 0);
}
num_bits -= count;
m_num_bits -= count;
return true;
}
private:
void SetBit(s32 depth, std::size_t offset) {
void SetBit(s32 depth, size_t offset) {
while (depth >= 0) {
std::size_t ind = offset / Common::BitSize<u64>();
std::size_t which = offset % Common::BitSize<u64>();
size_t ind = offset / Common::BitSize<u64>();
size_t which = offset % Common::BitSize<u64>();
const u64 mask = u64(1) << which;
u64* bit = std::addressof(bit_storages[depth][ind]);
u64* bit = std::addressof(m_bit_storages[depth][ind]);
u64 v = *bit;
ASSERT((v & mask) == 0);
*bit = v | mask;
@@ -232,13 +292,13 @@ private:
}
}
void ClearBit(s32 depth, std::size_t offset) {
void ClearBit(s32 depth, size_t offset) {
while (depth >= 0) {
std::size_t ind = offset / Common::BitSize<u64>();
std::size_t which = offset % Common::BitSize<u64>();
size_t ind = offset / Common::BitSize<u64>();
size_t which = offset % Common::BitSize<u64>();
const u64 mask = u64(1) << which;
u64* bit = std::addressof(bit_storages[depth][ind]);
u64* bit = std::addressof(m_bit_storages[depth][ind]);
u64 v = *bit;
ASSERT((v & mask) != 0);
v &= ~mask;
@@ -252,7 +312,7 @@ private:
}
private:
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
static constexpr s32 GetRequiredDepth(size_t region_size) {
s32 depth = 0;
while (true) {
region_size /= Common::BitSize<u64>();
@@ -264,8 +324,8 @@ private:
}
public:
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
std::size_t overhead_bits = 0;
static constexpr size_t CalculateManagementOverheadSize(size_t region_size) {
size_t overhead_bits = 0;
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
region_size =
Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>();
@@ -273,6 +333,13 @@ public:
}
return overhead_bits * sizeof(u64);
}
private:
std::array<u64*, MaxDepth> m_bit_storages{};
std::array<u64*, MaxDepth> m_end_storages{};
RandomBitGenerator m_rng;
size_t m_num_bits{};
size_t m_used_depths{};
};
} // namespace Kernel

View File

@@ -11,6 +11,16 @@
namespace Kernel {
class KernelCore;
class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
public:
static constexpr size_t BufferSize = PageSize;
public:
void Initialize(Core::System& system);
};
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
public:
explicit KPageBuffer(KernelCore&) {}
@@ -21,8 +31,6 @@ public:
private:
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
};
static_assert(sizeof(KPageBuffer) == PageSize);
static_assert(alignof(KPageBuffer) == PageSize);
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
} // namespace Kernel

View File

@@ -5,6 +5,7 @@
#include <list>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/memory_types.h"
@@ -12,6 +13,89 @@
namespace Kernel {
class KPageGroup;
class KBlockInfo {
private:
friend class KPageGroup;
public:
constexpr KBlockInfo() = default;
constexpr void Initialize(PAddr addr, size_t np) {
ASSERT(Common::IsAligned(addr, PageSize));
ASSERT(static_cast<u32>(np) == np);
m_page_index = static_cast<u32>(addr) / PageSize;
m_num_pages = static_cast<u32>(np);
}
constexpr PAddr GetAddress() const {
return m_page_index * PageSize;
}
constexpr size_t GetNumPages() const {
return m_num_pages;
}
constexpr size_t GetSize() const {
return this->GetNumPages() * PageSize;
}
constexpr PAddr GetEndAddress() const {
return (m_page_index + m_num_pages) * PageSize;
}
constexpr PAddr GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr KBlockInfo* GetNext() const {
return m_next;
}
constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const {
return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
}
constexpr bool operator==(const KBlockInfo& rhs) const {
return this->IsEquivalentTo(rhs);
}
constexpr bool operator!=(const KBlockInfo& rhs) const {
return !(*this == rhs);
}
constexpr bool IsStrictlyBefore(PAddr addr) const {
const PAddr end = this->GetEndAddress();
if (m_page_index != 0 && end == 0) {
return false;
}
return end < addr;
}
constexpr bool operator<(PAddr addr) const {
return this->IsStrictlyBefore(addr);
}
constexpr bool TryConcatenate(PAddr addr, size_t np) {
if (addr != 0 && addr == this->GetEndAddress()) {
m_num_pages += static_cast<u32>(np);
return true;
}
return false;
}
private:
constexpr void SetNext(KBlockInfo* next) {
m_next = next;
}
private:
KBlockInfo* m_next{};
u32 m_page_index{};
u32 m_num_pages{};
};
static_assert(sizeof(KBlockInfo) <= 0x10);
class KPageGroup final {
public:
class Node final {
@@ -92,6 +176,8 @@ public:
return nodes.empty();
}
void Finalize() {}
private:
std::list<Node> nodes;
};

View File

@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
return num_free;
}
PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
@@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0;
}
PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
// Get the size and required alignment.
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
// Determine meta-alignment of our desired alignment size.
const size_t align_shift = std::countr_zero(align_size);
// Decide on a block to allocate from.
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
{
// By default, we'll want to look at all blocks larger than our current one.
s32 max_blocks = static_cast<s32>(m_num_blocks);
// Determine the maximum block we should try to allocate from.
size_t possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
// Add the possible alignments from blocks at the current size.
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
m_blocks[i].GetNumFreeBlocks();
// If there are enough possible alignments, we don't need to look at larger blocks.
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
max_blocks = i + 1;
break;
}
}
// If we have any possible alignments which require a larger block, we need to pick one.
if (possible_alignments > 0 && index + 1 < max_blocks) {
// Select a random alignment from the possibilities.
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
// Determine which block corresponds to the random alignment we chose.
possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
// Add the possible alignments from blocks at the current size.
possible_alignments +=
(1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
m_blocks[i].GetNumFreeBlocks();
// If the current block gets us to our random choice, use the current block.
if (rnd < possible_alignments) {
index = i;
break;
}
}
}
}
// Pop a block from the index we selected.
if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
// Determine how much size we have left over.
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
leftover_size > 0) {
// Determine how many valid alignments we can have.
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
// Select a random valid alignment.
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
// Free memory before the random offset.
if (random_offset != 0) {
this->Free(addr, random_offset / PageSize);
}
// Advance our block by the random offset.
addr += random_offset;
// Free memory after our allocated block.
if (random_offset != leftover_size) {
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
}
}
// Return the block we allocated.
return addr;
}
return 0;
}
void KPageHeap::FreeBlock(PAddr block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);

View File

@@ -14,13 +14,9 @@
namespace Kernel {
class KPageHeap final {
class KPageHeap {
public:
YUZU_NON_COPYABLE(KPageHeap);
YUZU_NON_MOVEABLE(KPageHeap);
KPageHeap() = default;
~KPageHeap() = default;
constexpr PAddr GetAddress() const {
return m_heap_address;
@@ -57,7 +53,20 @@ public:
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
PAddr AllocateBlock(s32 index, bool random);
PAddr AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
} else {
return this->AllocateByLinearSearch(index);
}
}
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
// TODO: linear search support?
return this->AllocateByRandom(index, num_pages, align_pages);
}
void Free(PAddr addr, size_t num_pages);
static size_t CalculateManagementOverheadSize(size_t region_size) {
@@ -68,7 +77,7 @@ public:
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
@@ -77,7 +86,7 @@ public:
static constexpr s32 GetBlockIndex(size_t num_pages) {
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
@@ -85,7 +94,7 @@ public:
}
static constexpr size_t GetBlockSize(size_t index) {
return size_t(1) << MemoryBlockPageShifts[index];
return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
}
static constexpr size_t GetBlockNumPages(size_t index) {
@@ -93,13 +102,9 @@ public:
}
private:
class Block final {
class Block {
public:
YUZU_NON_COPYABLE(Block);
YUZU_NON_MOVEABLE(Block);
Block() = default;
~Block() = default;
constexpr size_t GetShift() const {
return m_block_shift;
@@ -201,6 +206,9 @@ private:
};
private:
PAddr AllocateByLinearSearch(s32 index);
PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
size_t num_block_shifts);
@@ -209,7 +217,8 @@ private:
size_t m_heap_size{};
size_t m_initial_used_size{};
size_t m_num_blocks{};
std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
std::array<Block, NumMemoryBlockPageShifts> m_blocks;
KPageBitmap::RandomBitGenerator m_rng;
std::vector<u64> m_management_data;
};

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,7 @@
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace Core {
class System;
@@ -23,7 +24,10 @@ class System;
namespace Kernel {
class KBlockInfoManager;
class KMemoryBlockManager;
class KResourceLimit;
class KSystemResource;
class KPageTable final {
public:
@@ -36,9 +40,9 @@ public:
~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
VAddr code_addr, size_t code_size,
KMemoryBlockSlabManager* mem_block_slab_manager,
KMemoryManager::Pool pool);
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
VAddr code_addr, size_t code_size, KSystemResource* system_resource,
KResourceLimit* resource_limit);
void Finalize();
@@ -74,12 +78,20 @@ public:
KMemoryState state, KMemoryPermission perm,
PAddr map_addr = 0);
Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
bool is_aligned);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
Result UnlockForIpcUserBuffer(VAddr address, size_t size);
Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
KMemoryPermission test_perm, KMemoryState dst_state, bool send);
Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -97,13 +109,54 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
protected:
struct PageLinkedList {
private:
struct Node {
Node* m_next;
std::array<u8, PageSize - sizeof(Node*)> m_buffer;
};
public:
constexpr PageLinkedList() = default;
void Push(Node* n) {
ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
n->m_next = m_root;
m_root = n;
}
void Push(Core::Memory::Memory& memory, VAddr addr) {
this->Push(memory.GetPointer<Node>(addr));
}
Node* Peek() const {
return m_root;
}
Node* Pop() {
Node* const r = m_root;
m_root = r->m_next;
r->m_next = nullptr;
return r;
}
private:
Node* m_root{};
};
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
private:
enum class OperationType : u32 {
Map,
MapGroup,
Unmap,
ChangePermissions,
ChangePermissionsAndRefresh,
Map = 0,
MapFirst = 1,
MapGroup = 2,
Unmap = 3,
ChangePermissions = 4,
ChangePermissionsAndRefresh = 5,
Separate = 6,
};
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
@@ -123,6 +176,7 @@ private:
OperationType operation);
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
PAddr map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
VAddr GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
@@ -199,6 +253,18 @@ private:
return *out != 0;
}
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send);
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
size_t size, KMemoryPermission prot_perm);
// HACK: These will be removed once we automatically manage page reference counts.
void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
@@ -316,6 +382,31 @@ public:
addr + size - 1 <= m_address_space_end - 1;
}
public:
static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return layout.GetLinearVirtualAddress(addr);
}
static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return layout.GetLinearPhysicalAddress(addr);
}
static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
private:
constexpr bool IsKernel() const {
return m_is_kernel;
@@ -330,6 +421,24 @@ private:
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:
class KScopedPageTableUpdater {
private:
KPageTable* m_pt{};
PageLinkedList m_ll;
public:
explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
~KScopedPageTableUpdater() {
m_pt->FinalizeUpdate(this->GetPageList());
}
PageLinkedList* GetPageList() {
return &m_ll;
}
};
private:
VAddr m_address_space_start{};
VAddr m_address_space_end{};
@@ -347,20 +456,27 @@ private:
VAddr m_alias_code_region_start{};
VAddr m_alias_code_region_end{};
size_t m_mapped_physical_memory_size{};
size_t m_max_heap_size{};
size_t m_max_physical_memory_size{};
size_t m_mapped_physical_memory_size{};
size_t m_mapped_unsafe_physical_memory{};
size_t m_mapped_insecure_memory{};
size_t m_mapped_ipc_server_memory{};
size_t m_address_space_width{};
KMemoryBlockManager m_memory_block_manager;
u32 m_allocate_option{};
bool m_is_kernel{};
bool m_enable_aslr{};
bool m_enable_device_address_space_merge{};
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
KBlockInfoManager* m_block_info_manager{};
KResourceLimit* m_resource_limit{};
u32 m_heap_fill_value{};
u32 m_ipc_fill_value{};
u32 m_stack_fill_value{};
const KMemoryRegion* m_cached_physical_heap_region{};
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};

View File

@@ -0,0 +1,55 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_page_table_slab_heap.h"
namespace Kernel {
class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> {
public:
using RefCount = KPageTableSlabHeap::RefCount;
static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize;
public:
KPageTableManager() = default;
void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) {
m_pt_heap = pt_heap;
static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>);
BaseHeap::Initialize(page_allocator, pt_heap);
}
VAddr Allocate() {
return VAddr(BaseHeap::Allocate());
}
RefCount GetRefCount(VAddr addr) const {
return m_pt_heap->GetRefCount(addr);
}
void Open(VAddr addr, int count) {
return m_pt_heap->Open(addr, count);
}
bool Close(VAddr addr, int count) {
return m_pt_heap->Close(addr, count);
}
bool IsInPageTableHeap(VAddr addr) const {
return m_pt_heap->IsInRange(addr);
}
private:
using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>;
KPageTableSlabHeap* m_pt_heap{};
};
} // namespace Kernel

View File

@@ -0,0 +1,93 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
namespace impl {
class PageTablePage {
public:
// Do not initialize anything.
PageTablePage() = default;
private:
std::array<u8, PageSize> m_buffer{};
};
static_assert(sizeof(PageTablePage) == PageSize);
} // namespace impl
class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> {
public:
using RefCount = u16;
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
static_assert(PageTableSize == PageSize);
public:
KPageTableSlabHeap() = default;
static constexpr size_t CalculateReferenceCountSize(size_t size) {
return (size / PageSize) * sizeof(RefCount);
}
void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) {
BaseHeap::Initialize(page_allocator, object_count);
this->Initialize(rc);
}
RefCount GetRefCount(VAddr addr) {
ASSERT(this->IsInRange(addr));
return *this->GetRefCountPointer(addr);
}
void Open(VAddr addr, int count) {
ASSERT(this->IsInRange(addr));
*this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
ASSERT(this->GetRefCount(addr) > 0);
}
bool Close(VAddr addr, int count) {
ASSERT(this->IsInRange(addr));
ASSERT(this->GetRefCount(addr) >= count);
*this->GetRefCountPointer(addr) -= static_cast<RefCount>(count);
return this->GetRefCount(addr) == 0;
}
bool IsInPageTableHeap(VAddr addr) const {
return this->IsInRange(addr);
}
private:
void Initialize([[maybe_unused]] RefCount* rc) {
// TODO(bunnei): Use rc once we support kernel virtual memory allocations.
const auto count = this->GetSize() / PageSize;
m_ref_counts.resize(count);
for (size_t i = 0; i < count; i++) {
m_ref_counts[i] = 0;
}
}
RefCount* GetRefCountPointer(VAddr addr) {
return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
}
private:
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
std::vector<RefCount> m_ref_counts;
};
} // namespace Kernel

View File

@@ -57,12 +57,6 @@ Result KPort::EnqueueSession(KServerSession* session) {
server.EnqueueSession(session);
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
session_ptr->ClientConnected(server.AcceptSession());
} else {
ASSERT(false);
}
return ResultSuccess;
}

View File

@@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
}
// Initialize proces address space
if (const Result result{page_table.InitializeForProcess(
metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
&kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)};
result.IsError()) {
R_RETURN(result);
}

View File

@@ -61,12 +61,6 @@ void KServerPort::Destroy() {
// Close our reference to our parent.
parent->Close();
// Release host emulation members.
session_handler.reset();
// Ensure that the global list tracking server objects does not hold on to a reference.
kernel.UnregisterServerObject(this);
}
bool KServerPort::IsSignaled() const {

View File

@@ -27,24 +27,6 @@ public:
void Initialize(KPort* parent_port_, std::string&& name_);
/// Whether or not this server port has an HLE handler available.
bool HasSessionRequestHandler() const {
return !session_handler.expired();
}
/// Gets the HLE handler for this port.
SessionRequestHandlerWeakPtr GetSessionRequestHandler() const {
return session_handler;
}
/**
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) {
session_handler = std::move(handler);
}
void EnqueueSession(KServerSession* pending_session);
KServerSession* AcceptSession();
@@ -65,7 +47,6 @@ private:
void CleanupSessions();
SessionList session_list;
SessionRequestHandlerWeakPtr session_handler;
KPort* parent{};
};

View File

@@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <tuple>
@@ -33,12 +33,10 @@ KServerSession::KServerSession(KernelCore& kernel_)
KServerSession::~KServerSession() = default;
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
std::shared_ptr<SessionRequestManager> manager_) {
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_) {
// Set member variables.
parent = parent_session_;
name = std::move(name_);
manager = manager_;
}
void KServerSession::Destroy() {
@@ -47,18 +45,99 @@ void KServerSession::Destroy() {
this->CleanupRequests();
parent->Close();
// Release host emulation members.
manager.reset();
// Ensure that the global list tracking server objects does not hold on to a reference.
kernel.UnregisterServerObject(this);
}
void KServerSession::OnClientClosed() {
if (manager && manager->HasSessionHandler()) {
manager->SessionHandler().ClientDisconnected(this);
KScopedLightLock lk{m_lock};
// Handle any pending requests.
KSessionRequest* prev_request = nullptr;
while (true) {
// Declare variables for processing the request.
KSessionRequest* request = nullptr;
KEvent* event = nullptr;
KThread* thread = nullptr;
bool cur_request = false;
bool terminate = false;
// Get the next request.
{
KScopedSchedulerLock sl{kernel};
if (m_current_request != nullptr && m_current_request != prev_request) {
// Set the request, open a reference as we process it.
request = m_current_request;
request->Open();
cur_request = true;
// Get thread and event for the request.
thread = request->GetThread();
event = request->GetEvent();
// If the thread is terminating, handle that.
if (thread->IsTerminationRequested()) {
request->ClearThread();
request->ClearEvent();
terminate = true;
}
prev_request = request;
} else if (!m_request_list.empty()) {
// Pop the request from the front of the list.
request = std::addressof(m_request_list.front());
m_request_list.pop_front();
// Get thread and event for the request.
thread = request->GetThread();
event = request->GetEvent();
}
}
// If there are no requests, we're done.
if (request == nullptr) {
break;
}
// All requests must have threads.
ASSERT(thread != nullptr);
// Ensure that we close the request when done.
SCOPE_EXIT({ request->Close(); });
// If we're terminating, close a reference to the thread and event.
if (terminate) {
thread->Close();
if (event != nullptr) {
event->Close();
}
}
// If we need to, reply.
if (event != nullptr && !cur_request) {
// There must be no mappings.
ASSERT(request->GetSendCount() == 0);
ASSERT(request->GetReceiveCount() == 0);
ASSERT(request->GetExchangeCount() == 0);
// // Get the process and page table.
// KProcess *client_process = thread->GetOwnerProcess();
// auto &client_pt = client_process->GetPageTable();
// // Reply to the request.
// ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(),
// ResultSessionClosed);
// // Unlock the buffer.
// // NOTE: Nintendo does not check the result of this.
// client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize());
// Signal the event.
event->Signal();
}
}
// Notify.
this->NotifyAvailable(ResultSessionClosed);
}
bool KServerSession::IsSignaled() const {
@@ -73,24 +152,6 @@ bool KServerSession::IsSignaled() const {
return !m_request_list.empty() && m_current_request == nullptr;
}
Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
return manager->QueueSyncRequest(parent, std::move(context));
}
Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
Result result = manager->CompleteSyncRequest(this, context);
// The calling thread is waiting for this request to complete, so wake it up.
context.GetThread().EndWait(result);
return result;
}
Result KServerSession::OnRequest(KSessionRequest* request) {
// Create the wait queue.
ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
@@ -105,24 +166,16 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
// Check that we're not terminating.
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
if (manager) {
// HLE request.
auto& memory{kernel.System().Memory()};
this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory);
} else {
// Non-HLE request.
// Get whether we're empty.
const bool was_empty = m_request_list.empty();
// Get whether we're empty.
const bool was_empty = m_request_list.empty();
// Add the request to the list.
request->Open();
m_request_list.push_back(*request);
// Add the request to the list.
request->Open();
m_request_list.push_back(*request);
// If we were empty, signal.
if (was_empty) {
this->NotifyAvailable();
}
// If we were empty, signal.
if (was_empty) {
this->NotifyAvailable();
}
// If we have a request event, this is asynchronous, and we don't need to wait.
@@ -136,7 +189,7 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
return GetCurrentThread(kernel).GetWaitResult();
}
Result KServerSession::SendReply() {
Result KServerSession::SendReply(bool is_hle) {
// Lock the session.
KScopedLightLock lk{m_lock};
@@ -171,13 +224,18 @@ Result KServerSession::SendReply() {
Result result = ResultSuccess;
if (!closed) {
// If we're not closed, send the reply.
Core::Memory::Memory& memory{kernel.System().Memory()};
KThread* server_thread{GetCurrentThreadPointer(kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
if (is_hle) {
// HLE servers write directly to a pointer to the thread command buffer. Therefore
// the reply has already been written in this case.
} else {
Core::Memory::Memory& memory{kernel.System().Memory()};
KThread* server_thread{GetCurrentThreadPointer(kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
auto* dst_msg_buffer = memory.GetPointer(client_message);
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
auto* dst_msg_buffer = memory.GetPointer(client_message);
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
}
} else {
result = ResultSessionClosed;
}
@@ -223,7 +281,8 @@ Result KServerSession::SendReply() {
return result;
}
Result KServerSession::ReceiveRequest() {
Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context,
std::weak_ptr<SessionRequestManager> manager) {
// Lock the session.
KScopedLightLock lk{m_lock};
@@ -267,12 +326,22 @@ Result KServerSession::ReceiveRequest() {
// Receive the message.
Core::Memory::Memory& memory{kernel.System().Memory()};
KThread* server_thread{GetCurrentThreadPointer(kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
if (out_context != nullptr) {
// HLE request.
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
*out_context = std::make_shared<HLERequestContext>(kernel, memory, this, client_thread);
(*out_context)->SetSessionRequestManager(manager);
(*out_context)
->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
cmd_buf);
} else {
KThread* server_thread{GetCurrentThreadPointer(kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(client_message);
auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
auto* src_msg_buffer = memory.GetPointer(client_message);
auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
}
// We succeeded.
return ResultSuccess;

View File

@@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -16,21 +16,11 @@
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
namespace Core::Memory {
class Memory;
}
namespace Core::Timing {
class CoreTiming;
struct EventType;
} // namespace Core::Timing
namespace Kernel {
class HLERequestContext;
class KernelCore;
class KSession;
class SessionRequestHandler;
class SessionRequestManager;
class KThread;
@@ -46,8 +36,7 @@ public:
void Destroy() override;
void Initialize(KSession* parent_session_, std::string&& name_,
std::shared_ptr<SessionRequestManager> manager_);
void Initialize(KSession* parent_session_, std::string&& name_);
KSession* GetParent() {
return parent;
@@ -60,32 +49,20 @@ public:
bool IsSignaled() const override;
void OnClientClosed();
/// Gets the session request manager, which forwards requests to the underlying service
std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
return manager;
}
/// TODO: flesh these out to match the real kernel
Result OnRequest(KSessionRequest* request);
Result SendReply();
Result ReceiveRequest();
Result SendReply(bool is_hle = false);
Result ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context = nullptr,
std::weak_ptr<SessionRequestManager> manager = {});
Result SendReplyHLE() {
return SendReply(true);
}
private:
/// Frees up waiting client sessions when this server session is about to die
void CleanupRequests();
/// Queues a sync request from the emulated application.
Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
Result CompleteSyncRequest(HLERequestContext& context);
/// This session's HLE request handlers; if nullptr, this is not an HLE server
std::shared_ptr<SessionRequestManager> manager;
/// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{};
/// KSession that owns this KServerSession
KSession* parent{};

View File

@@ -13,8 +13,7 @@ KSession::KSession(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, server{kernel_}, client{kernel_} {}
KSession::~KSession() = default;
void KSession::Initialize(KClientPort* port_, const std::string& name_,
std::shared_ptr<SessionRequestManager> manager_) {
void KSession::Initialize(KClientPort* port_, const std::string& name_) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both server and client are closed
@@ -26,7 +25,7 @@ void KSession::Initialize(KClientPort* port_, const std::string& name_,
KAutoObject::Create(std::addressof(client));
// Initialize our sub sessions.
server.Initialize(this, name_ + ":Server", manager_);
server.Initialize(this, name_ + ":Server");
client.Initialize(this, name_ + ":Client");
// Set state and name.

View File

@@ -21,8 +21,7 @@ public:
explicit KSession(KernelCore& kernel_);
~KSession() override;
void Initialize(KClientPort* port_, const std::string& name_,
std::shared_ptr<SessionRequestManager> manager_ = nullptr);
void Initialize(KClientPort* port_, const std::string& name_);
void Finalize() override;

View File

@@ -0,0 +1,26 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_system_resource.h"
namespace Kernel {
Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size,
[[maybe_unused]] KResourceLimit* resource_limit,
[[maybe_unused]] KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
}
void KSecureSystemResource::Finalize() {
// Unimplemented
UNREACHABLE();
}
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(
[[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
}
} // namespace Kernel

View File

@@ -0,0 +1,137 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_dynamic_resource_manager.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_table_manager.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
// NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses
// virtual IsSecureResource().
class KSystemResource : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
public:
explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {}
protected:
void SetSecureResource() {
m_is_secure_resource = true;
}
public:
virtual void Destroy() override {
UNREACHABLE_MSG("KSystemResource::Destroy() was called");
}
bool IsSecureResource() const {
return m_is_secure_resource;
}
void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) {
ASSERT(m_p_memory_block_slab_manager == nullptr);
ASSERT(m_p_block_info_manager == nullptr);
ASSERT(m_p_page_table_manager == nullptr);
m_p_memory_block_slab_manager = std::addressof(mb);
m_p_block_info_manager = std::addressof(bi);
m_p_page_table_manager = std::addressof(pt);
}
const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
return *m_p_memory_block_slab_manager;
}
const KBlockInfoManager& GetBlockInfoManager() const {
return *m_p_block_info_manager;
}
const KPageTableManager& GetPageTableManager() const {
return *m_p_page_table_manager;
}
KMemoryBlockSlabManager& GetMemoryBlockSlabManager() {
return *m_p_memory_block_slab_manager;
}
KBlockInfoManager& GetBlockInfoManager() {
return *m_p_block_info_manager;
}
KPageTableManager& GetPageTableManager() {
return *m_p_page_table_manager;
}
KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() {
return m_p_memory_block_slab_manager;
}
KBlockInfoManager* GetBlockInfoManagerPointer() {
return m_p_block_info_manager;
}
KPageTableManager* GetPageTableManagerPointer() {
return m_p_page_table_manager;
}
private:
KMemoryBlockSlabManager* m_p_memory_block_slab_manager{};
KBlockInfoManager* m_p_block_info_manager{};
KPageTableManager* m_p_page_table_manager{};
bool m_is_secure_resource{false};
};
class KSecureSystemResource final
: public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
public:
explicit KSecureSystemResource(KernelCore& kernel_)
: KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) {
// Mark ourselves as being a secure resource.
this->SetSecureResource();
}
Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool);
void Finalize();
bool IsInitialized() const {
return m_is_initialized;
}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
size_t CalculateRequiredSecureMemorySize() const {
return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool);
}
size_t GetSize() const {
return m_resource_size;
}
size_t GetUsedSize() const {
return m_dynamic_page_manager.GetUsed() * PageSize;
}
const KDynamicPageManager& GetDynamicPageManager() const {
return m_dynamic_page_manager;
}
public:
static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool);
private:
bool m_is_initialized{};
KMemoryManager::Pool m_resource_pool{};
KDynamicPageManager m_dynamic_page_manager;
KMemoryBlockSlabManager m_memory_block_slab_manager;
KBlockInfoManager m_block_info_manager;
KPageTableManager m_page_table_manager;
KMemoryBlockSlabHeap m_memory_block_heap;
KBlockInfoSlabHeap m_block_info_heap;
KPageTableSlabHeap m_page_table_heap;
KResourceLimit* m_resource_limit{};
VAddr m_resource_address{};
size_t m_resource_size{};
};
} // namespace Kernel

View File

@@ -1185,8 +1185,10 @@ void KThread::RequestDummyThreadWait() {
}
void KThread::DummyThreadBeginWait() {
ASSERT(this->IsDummyThread());
ASSERT(!kernel.IsPhantomModeForSingleCore());
if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) {
// Occurs in single core mode.
return;
}
// Block until runnable is no longer false.
dummy_thread_runnable.wait(false);

View File

@@ -28,10 +28,12 @@
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
@@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70));
namespace Kernel {
struct KernelCore::Impl {
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 64;
explicit Impl(Core::System& system_, KernelCore& kernel_)
: time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"},
service_thread_barrier{2}, system{system_} {}
@@ -60,7 +67,6 @@ struct KernelCore::Impl {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread");
is_phantom_mode_for_singlecore = false;
@@ -72,7 +78,6 @@ struct KernelCore::Impl {
// Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeShutdownThreads();
InitializePhysicalCores();
InitializePreemption(kernel);
@@ -82,10 +87,13 @@ struct KernelCore::Impl {
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
ASSERT(pt_heap_region.GetEndAddress() != 0);
InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
InitializeResourceManagers(kernel, pt_heap_region.GetAddress(),
pt_heap_region.GetSize());
}
RegisterHostThread();
default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");
}
void InitializeCores() {
@@ -184,17 +192,6 @@ struct KernelCore::Impl {
}
void CloseServices() {
// Close all open server sessions and ports.
std::unordered_set<KAutoObject*> server_objects_;
{
std::scoped_lock lk(server_objects_lock);
server_objects_ = server_objects;
server_objects.clear();
}
for (auto* server_object : server_objects_) {
server_object->Close();
}
// Ensures all service threads gracefully shutdown.
ClearServiceThreads();
}
@@ -263,16 +260,82 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
void InitializeResourceManagers(VAddr address, size_t size) {
dynamic_page_manager = std::make_unique<KDynamicPageManager>();
memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) {
// Ensure that the buffer is suitable for our use.
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(size, PageSize));
dynamic_page_manager->Initialize(address, size);
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
memory_block_heap->Initialize(dynamic_page_manager.get(),
ApplicationMemoryBlockSlabHeapSize);
app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
// Ensure that we have space for our reference counts.
const size_t rc_size =
Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize);
ASSERT(rc_size < size);
size -= rc_size;
// Initialize the resource managers' shared page manager.
resource_manager_page_manager = std::make_unique<KDynamicPageManager>();
resource_manager_page_manager->Initialize(
address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize));
// Initialize the KPageBuffer slab heap.
page_buffer_slab_heap.Initialize(system);
// Initialize the fixed-size slabheaps.
app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
block_info_heap = std::make_unique<KBlockInfoSlabHeap>();
app_memory_block_heap->Initialize(resource_manager_page_manager.get(),
ApplicationMemoryBlockSlabHeapSize);
sys_memory_block_heap->Initialize(resource_manager_page_manager.get(),
SystemMemoryBlockSlabHeapSize);
block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize);
// Reserve all but a fixed number of remaining pages for the page table heap.
const size_t num_pt_pages = resource_manager_page_manager->GetCount() -
resource_manager_page_manager->GetUsed() -
ReservedDynamicPageCount;
page_table_heap = std::make_unique<KPageTableSlabHeap>();
// TODO(bunnei): Pass in address once we support kernel virtual memory allocations.
page_table_heap->Initialize(
resource_manager_page_manager.get(), num_pt_pages,
/*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr);
// Setup the slab managers.
KDynamicPageManager* const app_dynamic_page_manager = nullptr;
KDynamicPageManager* const sys_dynamic_page_manager =
/*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true
? resource_manager_page_manager.get()
: nullptr;
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
app_block_info_manager = std::make_unique<KBlockInfoManager>();
sys_block_info_manager = std::make_unique<KBlockInfoManager>();
app_page_table_manager = std::make_unique<KPageTableManager>();
sys_page_table_manager = std::make_unique<KPageTableManager>();
app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get());
sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get());
app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get());
sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get());
app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get());
sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get());
// Check that we have the correct number of dynamic pages available.
ASSERT(resource_manager_page_manager->GetCount() -
resource_manager_page_manager->GetUsed() ==
ReservedDynamicPageCount);
// Create the system page table managers.
app_system_resource = std::make_unique<KSystemResource>(kernel);
sys_system_resource = std::make_unique<KSystemResource>(kernel);
// Set the managers for the system resources.
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
*app_page_table_manager);
sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager,
*sys_page_table_manager);
}
void InitializeShutdownThreads() {
@@ -346,6 +409,8 @@ struct KernelCore::Impl {
return this_id;
}
static inline thread_local bool is_phantom_mode_for_singlecore{false};
bool IsPhantomModeForSingleCore() const {
return is_phantom_mode_for_singlecore;
}
@@ -454,6 +519,9 @@ struct KernelCore::Impl {
ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
// Determine if we'll use extra thread resources.
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
@@ -464,7 +532,8 @@ struct KernelCore::Impl {
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
const size_t resource_region_size =
memory_layout->GetResourceRegionSizeForInit(use_extra_resources);
// Determine the size of the slab region.
const size_t slab_region_size =
@@ -698,54 +767,48 @@ struct KernelCore::Impl {
return {};
}
KClientPort* port = &search->second(system.ServiceManager(), system);
RegisterServerObject(&port->GetParent()->GetServerPort());
return port;
return &search->second(system.ServiceManager(), system);
}
void RegisterServerObject(KAutoObject* server_object) {
std::scoped_lock lk(server_objects_lock);
server_objects.insert(server_object);
void RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
auto search = service_interface_handlers.find(name);
if (search == service_interface_handlers.end()) {
return;
}
search->second(system.ServiceManager(), server_port);
}
void UnregisterServerObject(KAutoObject* server_object) {
std::scoped_lock lk(server_objects_lock);
server_objects.erase(server_object);
}
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel,
const std::string& name) {
auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name);
Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) {
auto* ptr = new ServiceThread(kernel, name);
service_threads_manager.QueueWork(
[this, service_thread]() { service_threads.emplace(service_thread); });
[this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); });
return service_thread;
return *ptr;
}
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
if (auto strong_ptr = service_thread.lock()) {
if (strong_ptr == default_service_thread.lock()) {
// Nothing to do here, the service is using default_service_thread, which will be
// released on shutdown.
return;
}
void ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
auto* ptr = &service_thread;
service_threads_manager.QueueWork(
[this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); });
if (ptr == default_service_thread) {
// Nothing to do here, the service is using default_service_thread, which will be
// released on shutdown.
return;
}
service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); });
}
void ClearServiceThreads() {
service_threads_manager.QueueWork([this] {
service_threads.clear();
default_service_thread.reset();
default_service_thread = nullptr;
service_thread_barrier.Sync();
});
service_thread_barrier.Sync();
}
std::mutex server_objects_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -763,6 +826,8 @@ struct KernelCore::Impl {
Init::KSlabResourceCounts slab_resource_counts{};
KResourceLimit* system_resource_limit{};
KPageBufferSlabHeap page_buffer_slab_heap;
std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which
@@ -774,8 +839,8 @@ struct KernelCore::Impl {
/// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
std::unordered_map<std::string, ServiceInterfaceHandlerFn> service_interface_handlers;
NamedPortTable named_ports;
std::unordered_set<KAutoObject*> server_objects;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
@@ -788,10 +853,20 @@ struct KernelCore::Impl {
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
// Dynamic slab managers
std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
// Resource managers
std::unique_ptr<KDynamicPageManager> resource_manager_page_manager;
std::unique_ptr<KPageTableSlabHeap> page_table_heap;
std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap;
std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap;
std::unique_ptr<KBlockInfoSlabHeap> block_info_heap;
std::unique_ptr<KPageTableManager> app_page_table_manager;
std::unique_ptr<KPageTableManager> sys_page_table_manager;
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager;
std::unique_ptr<KBlockInfoManager> app_block_info_manager;
std::unique_ptr<KBlockInfoManager> sys_block_info_manager;
std::unique_ptr<KSystemResource> app_system_resource;
std::unique_ptr<KSystemResource> sys_system_resource;
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
@@ -804,8 +879,8 @@ struct KernelCore::Impl {
std::unique_ptr<KMemoryLayout> memory_layout;
// Threads used for services
std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
std::weak_ptr<ServiceThread> default_service_thread;
std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads;
ServiceThread* default_service_thread{};
Common::ThreadWorker service_threads_manager;
Common::Barrier service_thread_barrier;
@@ -814,7 +889,6 @@ struct KernelCore::Impl {
bool is_multicore{};
std::atomic_bool is_shutting_down{};
bool is_phantom_mode_for_singlecore{};
u32 single_core_thread_id{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
@@ -981,16 +1055,17 @@ void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&
impl->service_interface_factory.emplace(std::move(name), factory);
}
void KernelCore::RegisterInterfaceForNamedService(std::string name,
ServiceInterfaceHandlerFn&& handler) {
impl->service_interface_handlers.emplace(std::move(name), handler);
}
KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
return impl->CreateNamedServicePort(std::move(name));
}
void KernelCore::RegisterServerObject(KAutoObject* server_object) {
impl->RegisterServerObject(server_object);
}
void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
impl->UnregisterServerObject(server_object);
void KernelCore::RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
impl->RegisterNamedServiceHandler(std::move(name), server_port);
}
void KernelCore::RegisterKernelObject(KAutoObject* object) {
@@ -1069,12 +1144,12 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
return *impl->app_memory_block_manager;
KSystemResource& KernelCore::GetSystemSystemResource() {
return *impl->sys_system_resource;
}
const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
return *impl->app_memory_block_manager;
const KSystemResource& KernelCore::GetSystemSystemResource() const {
return *impl->sys_system_resource;
}
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
@@ -1162,15 +1237,15 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
}
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) {
return impl->CreateServiceThread(*this, name);
}
std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const {
return impl->default_service_thread;
Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const {
return *impl->default_service_thread;
}
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
impl->ReleaseServiceThread(service_thread);
}

View File

@@ -34,22 +34,27 @@ class KClientPort;
class GlobalSchedulerContext;
class KAutoObjectWithListContainer;
class KClientSession;
class KDebug;
class KDynamicPageManager;
class KEvent;
class KEventInfo;
class KHandleTable;
class KLinkedListNode;
class KMemoryBlockSlabManager;
class KMemoryLayout;
class KMemoryManager;
class KPageBuffer;
class KPageBufferSlabHeap;
class KPort;
class KProcess;
class KResourceLimit;
class KScheduler;
class KServerPort;
class KServerSession;
class KSession;
class KSessionRequest;
class KSharedMemory;
class KSharedMemoryInfo;
class KSecureSystemResource;
class KThread;
class KThreadLocalPage;
class KTransferMemory;
@@ -63,6 +68,8 @@ class TimeManager;
using ServiceInterfaceFactory =
std::function<KClientPort&(Service::SM::ServiceManager&, Core::System&)>;
using ServiceInterfaceHandlerFn = std::function<void(Service::SM::ServiceManager&, KServerPort*)>;
namespace Init {
struct KSlabResourceCounts;
}
@@ -192,16 +199,14 @@ public:
/// Registers a named HLE service, passing a factory used to open a port to that service.
void RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory);
/// Registers a setup function for the named HLE service.
void RegisterInterfaceForNamedService(std::string name, ServiceInterfaceHandlerFn&& handler);
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
/// Registers a server session or port with the gobal emulation state, to be freed on shutdown.
/// This is necessary because we do not emulate processes for HLE sessions and ports.
void RegisterServerObject(KAutoObject* server_object);
/// Unregisters a server session or port previously registered with RegisterServerSession when
/// it was destroyed during the current emulation session.
void UnregisterServerObject(KAutoObject* server_object);
/// Accepts a session on a port created by CreateNamedServicePort.
void RegisterNamedServiceHandler(std::string name, KServerPort* server_port);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
@@ -243,11 +248,11 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
/// Gets the application memory block manager for the kernel.
KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
/// Gets the system resource manager.
KSystemResource& GetSystemSystemResource();
/// Gets the application memory block manager for the kernel.
const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
/// Gets the system resource manager.
const KSystemResource& GetSystemSystemResource() const;
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();
@@ -304,24 +309,24 @@ public:
* See GetDefaultServiceThread.
* @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread.
* @returns A reference to the newly created service thread.
*/
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
Kernel::ServiceThread& CreateServiceThread(const std::string& name);
/**
* Gets the default host service thread, which executes HLE service requests. Unless service
* requests need to block on the host, the default service thread should be used in favor of
* creating a new service thread.
* @returns The a weak pointer for the default service thread.
* @returns A reference to the default service thread.
*/
std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const;
Kernel::ServiceThread& GetDefaultServiceThread() const;
/**
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
* the ServerSession associated with the thread is destroyed.
* @param service_thread Service thread to release.
*/
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
void ReleaseServiceThread(Kernel::ServiceThread& service_thread);
/// Workaround for single-core mode when preempting threads while idle.
bool IsPhantomModeForSingleCore() const;
@@ -363,6 +368,12 @@ public:
return slab_heap_container->thread_local_page;
} else if constexpr (std::is_same_v<T, KSessionRequest>) {
return slab_heap_container->session_request;
} else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
return slab_heap_container->secure_system_resource;
} else if constexpr (std::is_same_v<T, KEventInfo>) {
return slab_heap_container->event_info;
} else if constexpr (std::is_same_v<T, KDebug>) {
return slab_heap_container->debug;
}
}
@@ -426,6 +437,9 @@ private:
KSlabHeap<KPageBuffer> page_buffer;
KSlabHeap<KThreadLocalPage> thread_local_page;
KSlabHeap<KSessionRequest> session_request;
KSlabHeap<KSecureSystemResource> secure_system_resource;
KSlabHeap<KEventInfo> event_info;
KSlabHeap<KDebug> debug;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;

View File

@@ -1,15 +1,18 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <condition_variable>
#include <functional>
#include <map>
#include <mutex>
#include <thread>
#include <vector>
#include <queue>
#include "common/scope_exit.h"
#include "common/thread.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -19,101 +22,198 @@ namespace Kernel {
class ServiceThread::Impl final {
public:
explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
explicit Impl(KernelCore& kernel, const std::string& service_name);
~Impl();
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
void WaitAndProcessImpl();
void SessionClosed(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager);
void LoopProcess();
void RegisterServerSession(KServerSession* session,
std::shared_ptr<SessionRequestManager> manager);
private:
std::vector<std::jthread> threads;
std::queue<std::function<void()>> requests;
std::mutex queue_mutex;
std::condition_variable_any condition;
const std::string service_name;
KernelCore& kernel;
std::jthread m_thread;
std::mutex m_session_mutex;
std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions;
KEvent* m_wakeup_event;
KProcess* m_process;
std::atomic<bool> m_shutdown_requested;
const std::string m_service_name;
};
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
: service_name{name} {
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back([this, &kernel](std::stop_token stop_token) {
Common::SetCurrentThreadName(std::string{service_name}.c_str());
void ServiceThread::Impl::WaitAndProcessImpl() {
// Create local list of waitable sessions.
std::vector<KSynchronizationObject*> objs;
std::vector<std::shared_ptr<SessionRequestManager>> managers;
// Wait for first request before trying to acquire a render context
{
std::unique_lock lock{queue_mutex};
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
}
{
// Lock to get the set.
std::scoped_lock lk{m_session_mutex};
if (stop_token.stop_requested()) {
return;
}
// Reserve the needed quantity.
objs.reserve(m_sessions.size() + 1);
managers.reserve(m_sessions.size());
// Allocate a dummy guest thread for this host thread.
kernel.RegisterHostThread();
// Copy to our local list.
for (const auto& [session, manager] : m_sessions) {
objs.push_back(session);
managers.push_back(manager);
}
while (true) {
std::function<void()> task;
// Insert the wakeup event at the end.
objs.push_back(&m_wakeup_event->GetReadableEvent());
}
{
std::unique_lock lock{queue_mutex};
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
// Wait on the list of sessions.
s32 index{-1};
Result rc = KSynchronizationObject::Wait(kernel, &index, objs.data(),
static_cast<s32>(objs.size()), -1);
ASSERT(!rc.IsFailure());
if (stop_token.stop_requested()) {
return;
}
// If this was the wakeup event, clear it and finish.
if (index >= static_cast<s64>(objs.size() - 1)) {
m_wakeup_event->Clear();
return;
}
if (requests.empty()) {
continue;
}
// This event is from a server session.
auto* server_session = static_cast<KServerSession*>(objs[index]);
auto& manager = managers[index];
task = std::move(requests.front());
requests.pop();
}
// Fetch the HLE request context.
std::shared_ptr<HLERequestContext> context;
rc = server_session->ReceiveRequest(&context, manager);
task();
}
});
// If the session was closed, handle that.
if (rc == ResultSessionClosed) {
SessionClosed(server_session, manager);
// Finish.
return;
}
// TODO: handle other cases
ASSERT(rc == ResultSuccess);
// Perform the request.
Result service_rc = manager->CompleteSyncRequest(server_session, *context);
// Reply to the client.
rc = server_session->SendReplyHLE();
if (rc == ResultSessionClosed || service_rc == IPC::ERR_REMOTE_PROCESS_DEAD) {
SessionClosed(server_session, manager);
return;
}
// TODO: handle other cases
ASSERT(rc == ResultSuccess);
ASSERT(service_rc == ResultSuccess);
}
void ServiceThread::Impl::SessionClosed(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager) {
{
// Lock to get the set.
std::scoped_lock lk{m_session_mutex};
// Erase the session.
ASSERT(m_sessions.erase(server_session) == 1);
}
// Close our reference to the server session.
server_session->Close();
}
void ServiceThread::Impl::LoopProcess() {
Common::SetCurrentThreadName(m_service_name.c_str());
kernel.RegisterHostThread();
while (!m_shutdown_requested.load()) {
WaitAndProcessImpl();
}
}
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) {
void ServiceThread::Impl::RegisterServerSession(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager) {
// Open the server session.
server_session->Open();
{
std::unique_lock lock{queue_mutex};
// Lock to get the set.
std::scoped_lock lk{m_session_mutex};
auto* server_session{&session.GetServerSession()};
// Open a reference to the session to ensure it is not closes while the service request
// completes asynchronously.
server_session->Open();
requests.emplace([server_session, context{std::move(context)}]() {
// Close the reference.
SCOPE_EXIT({ server_session->Close(); });
// Complete the service request.
server_session->CompleteSyncRequest(*context);
});
// Insert the session and manager.
m_sessions[server_session] = manager;
}
condition.notify_one();
// Signal the wakeup event.
m_wakeup_event->Signal();
}
ServiceThread::Impl::~Impl() {
condition.notify_all();
for (auto& thread : threads) {
thread.request_stop();
thread.join();
// Shut down the processing thread.
m_shutdown_requested.store(true);
m_wakeup_event->Signal();
m_thread.join();
// Lock mutex.
m_session_mutex.lock();
// Close all remaining sessions.
for (const auto& [server_session, manager] : m_sessions) {
server_session->Close();
}
// Destroy remaining managers.
m_sessions.clear();
// Close event.
m_wakeup_event->GetReadableEvent().Close();
m_wakeup_event->Close();
// Close process.
m_process->Close();
}
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name)
: kernel{kernel_}, m_service_name{service_name} {
// Initialize process.
m_process = KProcess::Create(kernel);
KProcess::Initialize(m_process, kernel.System(), service_name,
KProcess::ProcessType::KernelInternal, kernel.GetSystemResourceLimit());
// Reserve a new event from the process resource limit
KScopedResourceReservation event_reservation(m_process, LimitableResource::Events);
ASSERT(event_reservation.Succeeded());
// Initialize event.
m_wakeup_event = KEvent::Create(kernel);
m_wakeup_event->Initialize(m_process);
// Commit the event reservation.
event_reservation.Commit();
// Register the event.
KEvent::Register(kernel, m_wakeup_event);
// Start thread.
m_thread = std::jthread([this] { LoopProcess(); });
}
ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name)
: impl{std::make_unique<Impl>(kernel, name)} {}
ServiceThread::~ServiceThread() = default;
void ServiceThread::QueueSyncRequest(KSession& session,
std::shared_ptr<HLERequestContext>&& context) {
impl->QueueSyncRequest(session, std::move(context));
void ServiceThread::RegisterServerSession(KServerSession* session,
std::shared_ptr<SessionRequestManager> manager) {
impl->RegisterServerSession(session, manager);
}
} // namespace Kernel

View File

@@ -11,13 +11,15 @@ namespace Kernel {
class HLERequestContext;
class KernelCore;
class KSession;
class SessionRequestManager;
class ServiceThread final {
public:
explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
explicit ServiceThread(KernelCore& kernel, const std::string& name);
~ServiceThread();
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
void RegisterServerSession(KServerSession* session,
std::shared_ptr<SessionRequestManager> manager);
private:
class Impl;

View File

@@ -52,6 +52,84 @@ public:
}
};
template <typename Derived, typename Base>
class KAutoObjectWithSlabHeap : public Base {
static_assert(std::is_base_of<KAutoObject, Base>::value);
private:
static Derived* Allocate(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelCore& kernel, Derived* obj) {
kernel.SlabHeap<Derived>().Free(obj);
}
public:
explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
virtual ~KAutoObjectWithSlabHeap() = default;
virtual void Destroy() override {
const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0;
if (is_initialized) {
arg = this->GetPostDestroyArgument();
this->Finalize();
}
Free(kernel, static_cast<Derived*>(this));
if (is_initialized) {
Derived::PostDestroy(arg);
}
}
virtual bool IsInitialized() const {
return true;
}
virtual uintptr_t GetPostDestroyArgument() const {
return 0;
}
size_t GetSlabIndex() const {
return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
}
public:
static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) {
kernel.SlabHeap<Derived>().Initialize(memory, memory_size);
}
static Derived* Create(KernelCore& kernel) {
Derived* obj = Allocate(kernel);
if (obj != nullptr) {
KAutoObject::Create(obj);
}
return obj;
}
static size_t GetObjectSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetObjectSize();
}
static size_t GetSlabHeapSize(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapSize();
}
static size_t GetPeakIndex(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetPeakIndex();
}
static uintptr_t GetSlabHeapAddress(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetSlabHeapAddress();
}
static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining();
}
protected:
KernelCore& kernel;
};
template <typename Derived, typename Base>
class KAutoObjectWithSlabHeapAndContainer : public Base {
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);

View File

@@ -24,6 +24,7 @@
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -382,9 +383,9 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n
// Create a session.
KClientSession* session{};
R_TRY(port->CreateSession(std::addressof(session),
std::make_shared<SessionRequestManager>(kernel)));
port->Close();
R_TRY(port->CreateSession(std::addressof(session)));
kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort());
// Register the session in the table, close the extra reference.
handle_table.Register(*out, session);
@@ -2246,7 +2247,7 @@ static u64 GetSystemTick(Core::System& system) {
auto& core_timing = system.CoreTiming();
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
const u64 result{system.CoreTiming().GetClockTicks()};
const u64 result{core_timing.GetClockTicks()};
if (!system.Kernel().IsMulticore()) {
core_timing.AddTicks(400U);

View File

@@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel

View File

@@ -22,8 +22,8 @@ enum class MemoryState : u32 {
Ipc = 0x0A,
Stack = 0x0B,
ThreadLocal = 0x0C,
Transferred = 0x0D,
SharedTransferred = 0x0E,
Transfered = 0x0D,
SharedTransfered = 0x0E,
SharedCode = 0x0F,
Inaccessible = 0x10,
NonSecureIpc = 0x11,
@@ -32,6 +32,7 @@ enum class MemoryState : u32 {
GeneratedCode = 0x14,
CodeOut = 0x15,
Coverage = 0x16,
Insecure = 0x17,
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
@@ -83,6 +84,13 @@ enum class YieldType : s64 {
ToAnyThread = -2,
};
enum class ThreadExitReason : u32 {
ExitThread = 0,
TerminateThread = 1,
ExitProcess = 2,
TerminateProcess = 3,
};
enum class ThreadActivity : u32 {
Runnable = 0,
Paused = 1,
@@ -108,6 +116,34 @@ enum class ProcessState : u32 {
DebugBreak = 7,
};
enum class ProcessExitReason : u32 {
ExitProcess = 0,
TerminateProcess = 1,
Exception = 2,
};
constexpr inline size_t ThreadLocalRegionSize = 0x200;
// Debug types.
enum class DebugEvent : u32 {
CreateProcess = 0,
CreateThread = 1,
ExitProcess = 2,
ExitThread = 3,
Exception = 4,
};
enum class DebugException : u32 {
UndefinedInstruction = 0,
InstructionAbort = 1,
DataAbort = 2,
AlignmentFault = 3,
DebuggerAttached = 4,
BreakPoint = 5,
UserBreak = 6,
DebuggerBreak = 7,
UndefinedSystemCall = 8,
MemorySystemError = 9,
};
} // namespace Kernel::Svc

View File

@@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc
} // namespace ResultImpl
#define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \
[[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \
[[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \
std::same_as<decltype(__TmpCurrentResultReference), Result&>; \
[[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \
[[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \
Result& __TmpCurrentResultReference = \
HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE
[[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \
[[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \
Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \
? CONCAT2(PrevRef_, COUNTER_VALUE) \
: CONCAT2(__tmp_result_, COUNTER_VALUE)
#define ON_RESULT_RETURN_IMPL(...) \
static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \
auto RESULT_GUARD_STATE_##__COUNTER__ = \
auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \
ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \
__TmpCurrentResultReference) + \
[&]()

View File

@@ -512,10 +512,11 @@ protected:
class IManagerForApplication final : public ServiceFramework<IManagerForApplication> {
public:
explicit IManagerForApplication(Core::System& system_, Common::UUID user_id_)
explicit IManagerForApplication(Core::System& system_,
const std::shared_ptr<ProfileManager>& profile_manager_)
: ServiceFramework{system_, "IManagerForApplication"},
ensure_token_id{std::make_shared<EnsureTokenIdCacheAsyncInterface>(system)},
user_id{user_id_} {
profile_manager{profile_manager_} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &IManagerForApplication::CheckAvailability, "CheckAvailability"},
@@ -545,7 +546,7 @@ private:
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(ResultSuccess);
rb.PushRaw<u64>(user_id.Hash());
rb.PushRaw<u64>(profile_manager->GetLastOpenedUser().Hash());
}
void EnsureIdTokenCacheAsync(Kernel::HLERequestContext& ctx) {
@@ -575,17 +576,20 @@ private:
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(ResultSuccess);
rb.PushRaw<u64>(user_id.Hash());
rb.PushRaw<u64>(profile_manager->GetLastOpenedUser().Hash());
}
void StoreOpenContext(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
LOG_DEBUG(Service_ACC, "called");
profile_manager->StoreOpenedUsers();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
std::shared_ptr<EnsureTokenIdCacheAsyncInterface> ensure_token_id{};
Common::UUID user_id{};
std::shared_ptr<ProfileManager> profile_manager;
};
// 6.0.0+
@@ -790,7 +794,7 @@ void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestCo
LOG_DEBUG(Service_ACC, "called");
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser());
rb.PushIpcInterface<IManagerForApplication>(system, profile_manager);
}
void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) {
@@ -849,22 +853,10 @@ void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
void Module::Interface::LoadOpenContext(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
// This is similar to GetBaasAccountManagerForApplication
// This command is used concurrently with ListOpenContextStoredUsers
// TODO: Find the differences between this and GetBaasAccountManagerForApplication
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
rb.PushIpcInterface<IManagerForApplication>(system, profile_manager->GetLastOpenedUser());
}
void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
LOG_DEBUG(Service_ACC, "called");
// TODO(ogniK): Handle open contexts
ctx.WriteBuffer(profile_manager->GetOpenUsers());
ctx.WriteBuffer(profile_manager->GetStoredOpenedUsers());
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}

View File

@@ -35,7 +35,6 @@ public:
void InitializeApplicationInfoV2(Kernel::HLERequestContext& ctx);
void GetProfileEditor(Kernel::HLERequestContext& ctx);
void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
void LoadOpenContext(Kernel::HLERequestContext& ctx);
void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx);
void StoreSaveDataThumbnailApplication(Kernel::HLERequestContext& ctx);
void StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx);

View File

@@ -28,7 +28,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
{110, &ACC_U0::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"},
{111, nullptr, "ClearSaveDataThumbnail"},
{120, nullptr, "CreateGuestLoginRequest"},
{130, &ACC_U0::LoadOpenContext, "LoadOpenContext"}, // 5.0.0+
{130, nullptr, "LoadOpenContext"}, // 5.0.0+
{131, &ACC_U0::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 6.0.0+
{140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, // 6.0.0+
{141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+

View File

@@ -261,6 +261,31 @@ UUID ProfileManager::GetLastOpenedUser() const {
return last_opened_user;
}
/// Gets the list of stored opened users.
UserIDArray ProfileManager::GetStoredOpenedUsers() const {
UserIDArray output{};
std::ranges::transform(stored_opened_profiles, output.begin(), [](const ProfileInfo& p) {
if (p.is_open)
return p.user_uuid;
return Common::InvalidUUID;
});
std::stable_partition(output.begin(), output.end(),
[](const UUID& uuid) { return uuid.IsValid(); });
return output;
}
/// Captures the opened users, which can be queried across process launches with
/// ListOpenContextStoredUsers.
void ProfileManager::StoreOpenedUsers() {
size_t profile_index{};
stored_opened_profiles = {};
std::for_each(profiles.begin(), profiles.end(), [&](const auto& profile) {
if (profile.is_open) {
stored_opened_profiles[profile_index++] = profile;
}
});
}
/// Return the users profile base and the unknown arbitary data.
bool ProfileManager::GetProfileBaseAndData(std::optional<std::size_t> index, ProfileBase& profile,
UserData& data) const {

View File

@@ -86,6 +86,8 @@ public:
UserIDArray GetOpenUsers() const;
UserIDArray GetAllUsers() const;
Common::UUID GetLastOpenedUser() const;
UserIDArray GetStoredOpenedUsers() const;
void StoreOpenedUsers();
bool CanSystemRegisterUser() const;
@@ -101,6 +103,7 @@ private:
bool RemoveProfileAtIndex(std::size_t index);
std::array<ProfileInfo, MAX_USERS> profiles{};
std::array<ProfileInfo, MAX_USERS> stored_opened_profiles{};
std::size_t user_count{};
Common::UUID last_opened_user{};
};

View File

@@ -299,7 +299,7 @@ ISelfController::ISelfController(Core::System& system_, NVFlinger::NVFlinger& nv
{100, &ISelfController::SetAlbumImageTakenNotificationEnabled, "SetAlbumImageTakenNotificationEnabled"},
{110, nullptr, "SetApplicationAlbumUserData"},
{120, &ISelfController::SaveCurrentScreenshot, "SaveCurrentScreenshot"},
{130, nullptr, "SetRecordVolumeMuted"},
{130, &ISelfController::SetRecordVolumeMuted, "SetRecordVolumeMuted"},
{1000, nullptr, "GetDebugStorageChannel"},
};
// clang-format on
@@ -597,6 +597,17 @@ void ISelfController::SaveCurrentScreenshot(Kernel::HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
void ISelfController::SetRecordVolumeMuted(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto is_record_volume_muted = rp.Pop<bool>();
LOG_WARNING(Service_AM, "(STUBBED) called. is_record_volume_muted={}", is_record_volume_muted);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
AppletMessageQueue::AppletMessageQueue(Core::System& system)
: service_context{system, "AppletMessageQueue"} {
on_new_message = service_context.CreateEvent("AMMessageQueue:OnMessageReceived");

View File

@@ -182,6 +182,7 @@ private:
void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx);
void SetAlbumImageTakenNotificationEnabled(Kernel::HLERequestContext& ctx);
void SaveCurrentScreenshot(Kernel::HLERequestContext& ctx);
void SetRecordVolumeMuted(Kernel::HLERequestContext& ctx);
enum class ScreenshotPermission : u32 {
Inherit = 0,

View File

@@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
return result;
}
bool is_out_io{};
ASSERT(system.CurrentProcess()
->PageTable()
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
Kernel::KMemoryPermission::None, true)
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
handle_description->size,
Kernel::KMemoryPermission::None, true, false)
.IsSuccess());
std::memcpy(output.data(), &params, sizeof(params));
return result;

View File

@@ -99,6 +99,12 @@ ServiceFrameworkBase::ServiceFrameworkBase(Core::System& system_, const char* se
ServiceFrameworkBase::~ServiceFrameworkBase() {
// Wait for other threads to release access before destroying
const auto guard = LockService();
if (named_port != nullptr) {
named_port->GetClientPort().Close();
named_port->GetServerPort().Close();
named_port = nullptr;
}
}
void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) {
@@ -113,15 +119,16 @@ void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager)
Kernel::KClientPort& ServiceFrameworkBase::CreatePort() {
const auto guard = LockService();
ASSERT(!service_registered);
if (named_port == nullptr) {
ASSERT(!service_registered);
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(max_sessions, false, service_name);
port->GetServerPort().SetSessionHandler(shared_from_this());
named_port = Kernel::KPort::Create(kernel);
named_port->Initialize(max_sessions, false, service_name);
service_registered = true;
service_registered = true;
}
return port->GetClientPort();
return named_port->GetClientPort();
}
void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n) {
@@ -199,7 +206,6 @@ Result ServiceFrameworkBase::HandleSyncRequest(Kernel::KServerSession& session,
switch (ctx.GetCommandType()) {
case IPC::CommandType::Close:
case IPC::CommandType::TIPC_Close: {
session.Close();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
result = IPC::ERR_REMOTE_PROCESS_DEAD;
@@ -244,6 +250,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system
system.GetFileSystemController().CreateFactories(*system.GetFilesystem(), false);
system.Kernel().RegisterNamedService("sm:", SM::ServiceManager::InterfaceFactory);
system.Kernel().RegisterInterfaceForNamedService("sm:", SM::ServiceManager::SessionHandler);
Account::InstallInterfaces(system);
AM::InstallInterfaces(*sm, *nv_flinger, system);

View File

@@ -20,6 +20,7 @@ class System;
namespace Kernel {
class HLERequestContext;
class KClientPort;
class KPort;
class KServerSession;
class ServiceThread;
} // namespace Kernel
@@ -98,6 +99,9 @@ protected:
/// Identifier string used to connect to the service.
std::string service_name;
/// Port used by ManageNamedPort.
Kernel::KPort* named_port{};
private:
template <typename T>
friend class ServiceFramework;

View File

@@ -23,7 +23,13 @@ constexpr Result ERR_INVALID_NAME(ErrorModule::SM, 6);
constexpr Result ERR_SERVICE_NOT_REGISTERED(ErrorModule::SM, 7);
ServiceManager::ServiceManager(Kernel::KernelCore& kernel_) : kernel{kernel_} {}
ServiceManager::~ServiceManager() = default;
ServiceManager::~ServiceManager() {
for (auto& [name, port] : service_ports) {
port->GetClientPort().Close();
port->GetServerPort().Close();
}
}
void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) {
controller_interface->InvokeRequest(context);
@@ -43,6 +49,10 @@ Kernel::KClientPort& ServiceManager::InterfaceFactory(ServiceManager& self, Core
return self.sm_interface->CreatePort();
}
void ServiceManager::SessionHandler(ServiceManager& self, Kernel::KServerPort* server_port) {
self.sm_interface->AcceptSession(server_port);
}
Result ServiceManager::RegisterService(std::string name, u32 max_sessions,
Kernel::SessionRequestHandlerPtr handler) {
@@ -53,7 +63,11 @@ Result ServiceManager::RegisterService(std::string name, u32 max_sessions,
return ERR_ALREADY_REGISTERED;
}
registered_services.emplace(std::move(name), handler);
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(ServerSessionCountMax, false, name);
service_ports.emplace(name, port);
registered_services.emplace(name, handler);
return ResultSuccess;
}
@@ -68,24 +82,20 @@ Result ServiceManager::UnregisterService(const std::string& name) {
}
registered_services.erase(iter);
service_ports.erase(name);
return ResultSuccess;
}
ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name) {
CASCADE_CODE(ValidateServiceName(name));
auto it = registered_services.find(name);
if (it == registered_services.end()) {
auto it = service_ports.find(name);
if (it == service_ports.end()) {
LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
return ERR_SERVICE_NOT_REGISTERED;
}
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(ServerSessionCountMax, false, name);
auto handler = it->second;
port->GetServerPort().SetSessionHandler(std::move(handler));
return port;
return it->second;
}
/**
@@ -144,23 +154,20 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext&
// Find the named port.
auto port_result = service_manager.GetServicePort(name);
if (port_result.Failed()) {
auto service = service_manager.GetService<Kernel::SessionRequestHandler>(name);
if (port_result.Failed() || !service) {
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, port_result.Code().raw);
return port_result.Code();
}
auto& port = port_result.Unwrap();
SCOPE_EXIT({ port->GetClientPort().Close(); });
kernel.RegisterServerObject(&port->GetServerPort());
// Create a new session.
Kernel::KClientSession* session{};
if (const auto result = port->GetClientPort().CreateSession(
std::addressof(session), std::make_shared<Kernel::SessionRequestManager>(kernel));
result.IsError()) {
if (const auto result = port->GetClientPort().CreateSession(&session); result.IsError()) {
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw);
return result;
}
service->AcceptSession(&port->GetServerPort());
LOG_DEBUG(Service_SM, "called service={} -> session={}", name, session->GetId());

View File

@@ -51,6 +51,7 @@ private:
class ServiceManager {
public:
static Kernel::KClientPort& InterfaceFactory(ServiceManager& self, Core::System& system);
static void SessionHandler(ServiceManager& self, Kernel::KServerPort* server_port);
explicit ServiceManager(Kernel::KernelCore& kernel_);
~ServiceManager();
@@ -78,6 +79,7 @@ private:
/// Map of registered services, retrieved using GetServicePort.
std::unordered_map<std::string, Kernel::SessionRequestHandlerPtr> registered_services;
std::unordered_map<std::string, Kernel::KPort*> service_ports;
/// Kernel context
Kernel::KernelCore& kernel;

View File

@@ -15,10 +15,9 @@
namespace Service::SM {
void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
ASSERT_MSG(!ctx.Session()->GetSessionRequestManager()->IsDomain(),
"Session is already a domain");
ASSERT_MSG(!ctx.GetManager()->IsDomain(), "Session is already a domain");
LOG_DEBUG(Service, "called, server_session={}", ctx.Session()->GetId());
ctx.Session()->GetSessionRequestManager()->ConvertToDomainOnRequestEnd();
ctx.GetManager()->ConvertToDomainOnRequestEnd();
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
@@ -28,23 +27,35 @@ void Controller::ConvertCurrentObjectToDomain(Kernel::HLERequestContext& ctx) {
void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service, "called");
auto& parent_session = *ctx.Session()->GetParent();
auto& parent_port = parent_session.GetParent()->GetParent()->GetClientPort();
auto& session_manager = parent_session.GetServerSession().GetSessionRequestManager();
auto& process = *ctx.GetThread().GetOwnerProcess();
auto session_manager = ctx.GetManager();
// Create a session.
Kernel::KClientSession* session{};
const Result result = parent_port.CreateSession(std::addressof(session), session_manager);
if (result.IsError()) {
LOG_CRITICAL(Service, "CreateSession failed with error 0x{:08X}", result.raw);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(result);
}
// FIXME: this is duplicated from the SVC, it should just call it instead
// once this is a proper process
// Reserve a new session from the process resource limit.
Kernel::KScopedResourceReservation session_reservation(&process,
Kernel::LimitableResource::Sessions);
ASSERT(session_reservation.Succeeded());
// Create the session.
Kernel::KSession* session = Kernel::KSession::Create(system.Kernel());
ASSERT(session != nullptr);
// Initialize the session.
session->Initialize(nullptr, "");
// Commit the session reservation.
session_reservation.Commit();
// Register with manager.
session_manager->SessionHandler().RegisterSession(&session->GetServerSession(),
session_manager);
// We succeeded.
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
rb.Push(ResultSuccess);
rb.PushMoveObjects(session);
rb.PushMoveObjects(session->GetClientSession());
}
void Controller::CloneCurrentObjectEx(Kernel::HLERequestContext& ctx) {

View File

@@ -11,6 +11,10 @@
#include "core/internal_network/network_interface.h"
#include "core/internal_network/socket_proxy.h"
#if YUZU_UNIX
#include <sys/socket.h>
#endif
namespace Network {
ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {}

View File

@@ -224,6 +224,7 @@ add_library(shader_recompiler STATIC
ir_opt/lower_fp16_to_fp32.cpp
ir_opt/lower_int64_to_int32.cpp
ir_opt/passes.h
ir_opt/position_pass.cpp
ir_opt/rescaling_pass.cpp
ir_opt/ssa_rewrite_pass.cpp
ir_opt/texture_pass.cpp

View File

@@ -450,6 +450,9 @@ std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info, I
if (program.info.uses_rescaling_uniform) {
header += "PARAM scaling[1]={program.local[0..0]};";
}
if (program.info.uses_render_area) {
header += "PARAM render_area[1]={program.local[1..1]};";
}
header += "TEMP ";
for (size_t index = 0; index < ctx.reg_alloc.NumUsedRegisters(); ++index) {
header += fmt::format("R{},", index);

View File

@@ -43,6 +43,10 @@ void EmitBitCastU64F64(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastS32F32(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastF16U16(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}

View File

@@ -396,6 +396,10 @@ void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.F {}.x,scaling[0].z;", inst);
}
void EmitRenderArea(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.F {},render_area[0];", inst);
}
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset) {
ctx.Add("MOV.U {},lmem[{}].x;", inst, word_offset);
}

View File

@@ -73,6 +73,7 @@ void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst);
void EmitRenderArea(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset);
void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
@@ -195,6 +196,7 @@ void EmitSelectF64(EmitContext& ctx, ScalarS32 cond, Register true_value, Regist
void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);

View File

@@ -48,6 +48,10 @@ void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value)
ctx.AddU64("{}=doubleBitsToUint64({});", inst, value);
}
void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=ftoi({});", inst, value);
}
void EmitBitCastF16U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) {
NotImplemented();
}

View File

@@ -416,6 +416,10 @@ void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst) {
ctx.AddF32("{}=scaling.z;", inst);
}
void EmitRenderArea(EmitContext& ctx, IR::Inst& inst) {
ctx.AddF32x4("{}=render_area;", inst);
}
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset) {
ctx.AddU32("{}=lmem[{}];", inst, word_offset);
}

View File

@@ -87,6 +87,7 @@ void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
void EmitResolutionDownFactor(EmitContext& ctx, IR::Inst& inst);
void EmitRenderArea(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset);
void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
@@ -229,6 +230,7 @@ void EmitSelectF64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst);
void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst);
void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);

View File

@@ -358,6 +358,9 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
if (info.uses_rescaling_uniform) {
header += "layout(location=0) uniform vec4 scaling;";
}
if (info.uses_render_area) {
header += "layout(location=1) uniform vec4 render_area;";
}
DefineConstantBuffers(bindings);
DefineConstantBufferIndirect();
DefineStorageBuffers(bindings);

View File

@@ -23,8 +23,12 @@ struct RescalingLayout {
alignas(16) std::array<u32, NUM_IMAGE_SCALING_WORDS> rescaling_images;
u32 down_factor;
};
struct RenderAreaLayout {
std::array<f32, 4> render_area;
};
constexpr u32 RESCALING_LAYOUT_WORDS_OFFSET = offsetof(RescalingLayout, rescaling_textures);
constexpr u32 RESCALING_LAYOUT_DOWN_FACTOR_OFFSET = offsetof(RescalingLayout, down_factor);
constexpr u32 RENDERAREA_LAYOUT_OFFSET = offsetof(RenderAreaLayout, render_area);
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);

View File

@@ -18,6 +18,10 @@ void EmitBitCastU64F64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitBitCastS32F32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitBitCastF16U16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}

View File

@@ -353,7 +353,6 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
case IR::Attribute::TessellationEvaluationPointV:
return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U)));
default:
throw NotImplementedException("Read attribute {}", attr);
}
@@ -537,6 +536,17 @@ Id EmitResolutionDownFactor(EmitContext& ctx) {
}
}
Id EmitRenderArea(EmitContext& ctx) {
if (ctx.profile.unified_descriptor_binding) {
const Id pointer_type{ctx.TypePointer(spv::StorageClass::PushConstant, ctx.F32[4])};
const Id index{ctx.Const(ctx.render_are_member_index)};
const Id pointer{ctx.OpAccessChain(pointer_type, ctx.render_area_push_constant, index)};
return ctx.OpLoad(ctx.F32[4], pointer);
} else {
throw NotImplementedException("SPIR-V Instruction");
}
}
Id EmitLoadLocal(EmitContext& ctx, Id word_offset) {
const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
return ctx.OpLoad(ctx.U32[1], pointer);

View File

@@ -76,6 +76,7 @@ Id EmitSampleId(EmitContext& ctx);
Id EmitIsHelperInvocation(EmitContext& ctx);
Id EmitYDirection(EmitContext& ctx);
Id EmitResolutionDownFactor(EmitContext& ctx);
Id EmitRenderArea(EmitContext& ctx);
Id EmitLoadLocal(EmitContext& ctx, Id word_offset);
void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value);
Id EmitUndefU1(EmitContext& ctx);
@@ -177,7 +178,8 @@ Id EmitSelectF64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
void EmitBitCastU16F16(EmitContext& ctx);
Id EmitBitCastU32F32(EmitContext& ctx, Id value);
void EmitBitCastU64F64(EmitContext& ctx);
void EmitBitCastF16U16(EmitContext& ctx);
void EmitBitCastS32F32(EmitContext& ctx);
void EmitBitCastF16U16(EmitContext&);
Id EmitBitCastF32U32(EmitContext& ctx, Id value);
void EmitBitCastF64U64(EmitContext& ctx);
Id EmitPackUint2x32(EmitContext& ctx, Id value);

View File

@@ -473,6 +473,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf
DefineAttributeMemAccess(program.info);
DefineGlobalMemoryFunctions(program.info);
DefineRescalingInput(program.info);
DefineRenderArea(program.info);
}
EmitContext::~EmitContext() = default;
@@ -982,6 +983,36 @@ void EmitContext::DefineRescalingInputUniformConstant() {
}
}
void EmitContext::DefineRenderArea(const Info& info) {
if (!info.uses_render_area) {
return;
}
if (profile.unified_descriptor_binding) {
boost::container::static_vector<Id, 1> members{};
u32 member_index{0};
members.push_back(F32[4]);
render_are_member_index = member_index++;
const Id push_constant_struct{TypeStruct(std::span(members.data(), members.size()))};
Decorate(push_constant_struct, spv::Decoration::Block);
Name(push_constant_struct, "RenderAreaInfo");
MemberDecorate(push_constant_struct, render_are_member_index, spv::Decoration::Offset, 0);
MemberName(push_constant_struct, render_are_member_index, "render_area");
const Id pointer_type{TypePointer(spv::StorageClass::PushConstant, push_constant_struct)};
render_area_push_constant =
AddGlobalVariable(pointer_type, spv::StorageClass::PushConstant);
Name(render_area_push_constant, "render_area_push_constants");
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(render_area_push_constant);
}
}
}
void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
if (info.constant_buffer_descriptors.empty()) {
return;

View File

@@ -243,6 +243,9 @@ public:
u32 texture_rescaling_index{};
u32 image_rescaling_index{};
Id render_area_push_constant{};
u32 render_are_member_index{};
Id local_memory{};
Id shared_memory_u8{};
@@ -318,6 +321,7 @@ private:
void DefineRescalingInput(const Info& info);
void DefineRescalingInputPushConstant();
void DefineRescalingInputUniformConstant();
void DefineRenderArea(const Info& info);
void DefineInputs(const IR::Program& program);
void DefineOutputs(const IR::Program& program);

View File

@@ -22,6 +22,10 @@ public:
[[nodiscard]] virtual TextureType ReadTextureType(u32 raw_handle) = 0;
[[nodiscard]] virtual TexturePixelFormat ReadTexturePixelFormat(u32 raw_handle) = 0;
[[nodiscard]] virtual u32 ReadViewportTransformState() = 0;
[[nodiscard]] virtual u32 TextureBoundBuffer() const = 0;
[[nodiscard]] virtual u32 LocalMemorySize() const = 0;

View File

@@ -378,6 +378,14 @@ F32 IREmitter::ResolutionDownFactor() {
return Inst<F32>(Opcode::ResolutionDownFactor);
}
F32 IREmitter::RenderAreaWidth() {
return F32(CompositeExtract(Inst<Value>(Opcode::RenderArea), 0));
}
F32 IREmitter::RenderAreaHeight() {
return F32(CompositeExtract(Inst<Value>(Opcode::RenderArea), 1));
}
U32 IREmitter::LaneId() {
return Inst<U32>(Opcode::LaneId);
}
@@ -683,6 +691,11 @@ IR::U32 IREmitter::BitCast<IR::U32, IR::F32>(const IR::F32& value) {
return Inst<IR::U32>(Opcode::BitCastU32F32, value);
}
template <>
IR::S32 IREmitter::BitCast<IR::S32, IR::F32>(const IR::F32& value) {
return Inst<IR::S32>(Opcode::BitCastS32F32, value);
}
template <>
IR::F32 IREmitter::BitCast<IR::F32, IR::U32>(const IR::U32& value) {
return Inst<IR::F32>(Opcode::BitCastF32U32, value);

View File

@@ -103,6 +103,9 @@ public:
[[nodiscard]] F32 ResolutionDownFactor();
[[nodiscard]] F32 RenderAreaWidth();
[[nodiscard]] F32 RenderAreaHeight();
[[nodiscard]] U32 LaneId();
[[nodiscard]] U32 LoadGlobalU8(const U64& address);

View File

@@ -37,6 +37,7 @@ constexpr Type U8{Type::U8};
constexpr Type U16{Type::U16};
constexpr Type U32{Type::U32};
constexpr Type U64{Type::U64};
constexpr Type S32{Type::S32};
constexpr Type F16{Type::F16};
constexpr Type F32{Type::F32};
constexpr Type F64{Type::F64};

View File

@@ -63,6 +63,7 @@ OPCODE(SampleId, U32,
OPCODE(IsHelperInvocation, U1, )
OPCODE(YDirection, F32, )
OPCODE(ResolutionDownFactor, F32, )
OPCODE(RenderArea, F32x4, )
// Undefined
OPCODE(UndefU1, U1, )
@@ -173,6 +174,7 @@ OPCODE(SelectF64, F64, U1,
OPCODE(BitCastU16F16, U16, F16, )
OPCODE(BitCastU32F32, U32, F32, )
OPCODE(BitCastU64F64, U64, F64, )
OPCODE(BitCastS32F32, S32, F32, )
OPCODE(BitCastF16U16, F16, U16, )
OPCODE(BitCastF32U32, F32, U32, )
OPCODE(BitCastF64U64, F64, U64, )

View File

@@ -24,21 +24,22 @@ enum class Type {
U16 = 1 << 7,
U32 = 1 << 8,
U64 = 1 << 9,
F16 = 1 << 10,
F32 = 1 << 11,
F64 = 1 << 12,
U32x2 = 1 << 13,
U32x3 = 1 << 14,
U32x4 = 1 << 15,
F16x2 = 1 << 16,
F16x3 = 1 << 17,
F16x4 = 1 << 18,
F32x2 = 1 << 19,
F32x3 = 1 << 20,
F32x4 = 1 << 21,
F64x2 = 1 << 22,
F64x3 = 1 << 23,
F64x4 = 1 << 24,
S32 = 1 << 10,
F16 = 1 << 11,
F32 = 1 << 12,
F64 = 1 << 13,
U32x2 = 1 << 14,
U32x3 = 1 << 15,
U32x4 = 1 << 16,
F16x2 = 1 << 17,
F16x3 = 1 << 18,
F16x4 = 1 << 19,
F32x2 = 1 << 20,
F32x3 = 1 << 21,
F32x4 = 1 << 22,
F64x2 = 1 << 23,
F64x3 = 1 << 24,
F64x4 = 1 << 25,
};
DECLARE_ENUM_FLAG_OPERATORS(Type)

View File

@@ -23,6 +23,8 @@ Value::Value(u16 value) noexcept : type{Type::U16}, imm_u16{value} {}
Value::Value(u32 value) noexcept : type{Type::U32}, imm_u32{value} {}
Value::Value(s32 value) noexcept : type{Type::S32}, imm_s32{value} {}
Value::Value(f32 value) noexcept : type{Type::F32}, imm_f32{value} {}
Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {}
@@ -69,6 +71,7 @@ bool Value::operator==(const Value& other) const {
return imm_u16 == other.imm_u16;
case Type::U32:
case Type::F32:
case Type::S32:
return imm_u32 == other.imm_u32;
case Type::U64:
case Type::F64:

View File

@@ -44,6 +44,7 @@ public:
explicit Value(u8 value) noexcept;
explicit Value(u16 value) noexcept;
explicit Value(u32 value) noexcept;
explicit Value(s32 value) noexcept;
explicit Value(f32 value) noexcept;
explicit Value(u64 value) noexcept;
explicit Value(f64 value) noexcept;
@@ -66,6 +67,7 @@ public:
[[nodiscard]] u8 U8() const;
[[nodiscard]] u16 U16() const;
[[nodiscard]] u32 U32() const;
[[nodiscard]] s32 S32() const;
[[nodiscard]] f32 F32() const;
[[nodiscard]] u64 U64() const;
[[nodiscard]] f64 F64() const;
@@ -85,6 +87,7 @@ private:
u8 imm_u8;
u16 imm_u16;
u32 imm_u32;
s32 imm_s32;
f32 imm_f32;
u64 imm_u64;
f64 imm_f64;
@@ -266,6 +269,7 @@ using U8 = TypedValue<Type::U8>;
using U16 = TypedValue<Type::U16>;
using U32 = TypedValue<Type::U32>;
using U64 = TypedValue<Type::U64>;
using S32 = TypedValue<Type::S32>;
using F16 = TypedValue<Type::F16>;
using F32 = TypedValue<Type::F32>;
using F64 = TypedValue<Type::F64>;
@@ -377,6 +381,14 @@ inline u32 Value::U32() const {
return imm_u32;
}
inline s32 Value::S32() const {
if (IsIdentity()) {
return inst->Arg(0).S32();
}
DEBUG_ASSERT(type == Type::S32);
return imm_s32;
}
inline f32 Value::F32() const {
if (IsIdentity()) {
return inst->Arg(0).F32();

View File

@@ -220,6 +220,8 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
Optimization::ConstantPropagationPass(program);
Optimization::PositionPass(env, program);
Optimization::GlobalMemoryToStorageBufferPass(program);
Optimization::TexturePass(env, program);

View File

@@ -17,6 +17,7 @@ void LowerFp16ToFp32(IR::Program& program);
void LowerInt64ToInt32(IR::Program& program);
void RescalingPass(IR::Program& program);
void SsaRewritePass(IR::Program& program);
void PositionPass(Environment& env, IR::Program& program);
void TexturePass(Environment& env, IR::Program& program);
void VerificationPass(const IR::Program& program);

View File

@@ -0,0 +1,77 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <boost/container/small_vector.hpp>
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/ir_emitter.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/ir_opt/passes.h"
namespace Shader::Optimization {
namespace {
struct PositionInst {
IR::Inst* inst;
IR::Block* block;
IR::Attribute attr;
};
using PositionInstVector = boost::container::small_vector<PositionInst, 24>;
} // Anonymous namespace
void PositionPass(Environment& env, IR::Program& program) {
if (env.ShaderStage() != Stage::VertexB || env.ReadViewportTransformState()) {
return;
}
Info& info{program.info};
info.uses_render_area = true;
PositionInstVector to_replace;
for (IR::Block* const block : program.post_order_blocks) {
for (IR::Inst& inst : block->Instructions()) {
switch (inst.GetOpcode()) {
case IR::Opcode::SetAttribute: {
const IR::Attribute attr{inst.Arg(0).Attribute()};
switch (attr) {
case IR::Attribute::PositionX:
case IR::Attribute::PositionY: {
to_replace.push_back(PositionInst{.inst = &inst, .block = block, .attr = attr});
break;
}
default:
break;
}
break;
}
default:
break;
}
}
}
for (PositionInst& position_inst : to_replace) {
IR::IREmitter ir{*position_inst.block,
IR::Block::InstructionList::s_iterator_to(*position_inst.inst)};
const IR::F32 value(position_inst.inst->Arg(1));
const IR::F32F64 scale(ir.Imm32(2.f));
const IR::F32 negative_one{ir.Imm32(-1.f)};
switch (position_inst.attr) {
case IR::Attribute::PositionX: {
position_inst.inst->SetArg(
1,
ir.FPFma(value, ir.FPMul(ir.FPRecip(ir.RenderAreaWidth()), scale), negative_one));
break;
}
case IR::Attribute::PositionY: {
position_inst.inst->SetArg(
1,
ir.FPFma(value, ir.FPMul(ir.FPRecip(ir.RenderAreaHeight()), scale), negative_one));
break;
}
default:
break;
}
}
}
} // namespace Shader::Optimization

View File

@@ -7,6 +7,7 @@
#include <boost/container/small_vector.hpp>
#include "common/settings.h"
#include "shader_recompiler/environment.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/breadth_first_search.h"
@@ -363,6 +364,14 @@ TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
return env.ReadTextureType(lhs_raw | rhs_raw);
}
TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAddr& cbuf) {
const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)};
const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)};
return env.ReadTexturePixelFormat(lhs_raw | rhs_raw);
}
class Descriptors {
public:
explicit Descriptors(TextureBufferDescriptors& texture_buffer_descriptors_,
@@ -451,6 +460,38 @@ void PatchImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
ir.FPMul(IR::F32(ir.CompositeExtract(coord, 1)),
ir.FPRecip(ir.ConvertUToF(32, 32, ir.CompositeExtract(texture_size, 1))))));
}
void PathTexelFetch(IR::Block& block, IR::Inst& inst, TexturePixelFormat pixel_format) {
const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
auto get_max_value = [pixel_format]() -> float {
switch (pixel_format) {
case TexturePixelFormat::A8B8G8R8_SNORM:
case TexturePixelFormat::R8G8_SNORM:
case TexturePixelFormat::R8_SNORM:
return 1.f / std::numeric_limits<char>::max();
case TexturePixelFormat::R16G16B16A16_SNORM:
case TexturePixelFormat::R16G16_SNORM:
case TexturePixelFormat::R16_SNORM:
return 1.f / std::numeric_limits<short>::max();
default:
throw InvalidArgument("Invalid texture pixel format");
}
};
const IR::Value new_inst{&*block.PrependNewInst(it, inst)};
const IR::F32 x(ir.CompositeExtract(new_inst, 0));
const IR::F32 y(ir.CompositeExtract(new_inst, 1));
const IR::F32 z(ir.CompositeExtract(new_inst, 2));
const IR::F32 w(ir.CompositeExtract(new_inst, 3));
const IR::F16F32F64 max_value(ir.Imm32(get_max_value()));
const IR::Value converted =
ir.CompositeConstruct(ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(x)), max_value),
ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(y)), max_value),
ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(z)), max_value),
ir.FPMul(ir.ConvertSToF(32, 32, ir.BitCast<IR::S32>(w)), max_value));
inst.ReplaceUsesWith(converted);
}
} // Anonymous namespace
void TexturePass(Environment& env, IR::Program& program) {
@@ -597,6 +638,14 @@ void TexturePass(Environment& env, IR::Program& program) {
} else {
inst->SetArg(0, IR::Value{});
}
if (Settings::values.renderer_backend.GetValue() == Settings::RendererBackend::OpenGL &&
inst->GetOpcode() == IR::Opcode::ImageFetch && flags.type == TextureType::Buffer) {
const auto pixel_format = ReadTexturePixelFormat(env, cbuf);
if (pixel_format != TexturePixelFormat::OTHER) {
PathTexelFetch(*texture_inst.block, *texture_inst.inst, pixel_format);
}
}
}
}

View File

@@ -29,6 +29,16 @@ enum class TextureType : u32 {
};
constexpr u32 NUM_TEXTURE_TYPES = 9;
enum class TexturePixelFormat : u32 {
A8B8G8R8_SNORM,
R8_SNORM,
R8G8_SNORM,
R16G16B16A16_SNORM,
R16G16_SNORM,
R16_SNORM,
OTHER
};
enum class ImageFormat : u32 {
Typeless,
R8_UINT,
@@ -182,6 +192,7 @@ struct Info {
bool uses_shadow_lod{};
bool uses_rescaling_uniform{};
bool uses_cbuf_indirect{};
bool uses_render_area{};
IR::Type used_constant_buffer_types{};
IR::Type used_storage_buffer_types{};

View File

@@ -631,47 +631,40 @@ void Maxwell3D::ProcessDeferredDraw() {
Instance,
};
DrawMode draw_mode{DrawMode::Undefined};
u32 instance_count = 1;
u32 index = 0;
u32 method = 0;
u32 method_count = static_cast<u32>(deferred_draw_method.size());
for (; index < method_count &&
(method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin);
++index)
;
if (MAXWELL3D_REG_INDEX(draw.begin) != method) {
return;
}
// The minimum number of methods for drawing must be greater than or equal to
// 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing
if ((method_count - index) < 3) {
u32 method = deferred_draw_method[method_count - 1];
if (MAXWELL3D_REG_INDEX(draw.end) != method) {
return;
}
draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)
? DrawMode::Instance
: DrawMode::General;
// Drawing will only begin with draw.begin or index_buffer method, other methods directly
// clear
if (draw_mode == DrawMode::Undefined) {
deferred_draw_method.clear();
return;
}
u32 instance_count = 0;
if (draw_mode == DrawMode::Instance) {
ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error");
instance_count = static_cast<u32>(method_count - index) / 4;
u32 vertex_buffer_count = 0;
u32 index_buffer_count = 0;
for (u32 index = 0; index < method_count; ++index) {
method = deferred_draw_method[index];
if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count)) {
instance_count = ++vertex_buffer_count;
} else if (method == MAXWELL3D_REG_INDEX(index_buffer.count)) {
instance_count = ++index_buffer_count;
}
}
ASSERT_MSG(!(vertex_buffer_count && index_buffer_count),
"Instance both indexed and direct?");
} else {
method = deferred_draw_method[index + 1];
if (MAXWELL3D_REG_INDEX(draw_inline_index) == method ||
MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method ||
MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4);
regs.index_buffer.format = Regs::IndexFormat::UnsignedInt;
instance_count = 1;
for (u32 index = 0; index < method_count; ++index) {
method = deferred_draw_method[index];
if (MAXWELL3D_REG_INDEX(draw_inline_index) == method ||
MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method ||
MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) {
regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4);
regs.index_buffer.format = Regs::IndexFormat::UnsignedInt;
break;
}
}
}

View File

@@ -45,7 +45,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
kind_valus.fill(PTEKind::INVALID);
big_kinds.resize(big_page_table_size / 32, kind_valus);
entries.resize(page_table_size / 32, 0);
kinds.resize(big_page_table_size / 32, kind_valus);
kinds.resize(page_table_size / 32, kind_valus);
}
MemoryManager::~MemoryManager() = default;

View File

@@ -29,17 +29,17 @@ constexpr std::array PROGRAM_LUT{
[[nodiscard]] GLenum GetTextureBufferFormat(GLenum gl_format) {
switch (gl_format) {
case GL_RGBA8_SNORM:
return GL_RGBA8;
return GL_RGBA8I;
case GL_R8_SNORM:
return GL_R8;
return GL_R8I;
case GL_RGBA16_SNORM:
return GL_RGBA16;
return GL_RGBA16I;
case GL_R16_SNORM:
return GL_R16;
return GL_R16I;
case GL_RG16_SNORM:
return GL_RG16;
return GL_RG16I;
case GL_RG8_SNORM:
return GL_RG8;
return GL_RG8I;
default:
return gl_format;
}
@@ -96,9 +96,6 @@ GLuint Buffer::View(u32 offset, u32 size, PixelFormat format) {
texture.Create(GL_TEXTURE_BUFFER);
const GLenum gl_format{MaxwellToGL::GetFormatTuple(format).internal_format};
const GLenum texture_format{GetTextureBufferFormat(gl_format)};
if (texture_format != gl_format) {
LOG_WARNING(Render_OpenGL, "Emulating SNORM texture buffer with UNORM.");
}
glTextureBufferRange(texture.handle, texture_format, buffer.handle, offset, size);
views.push_back({
.offset = offset,

Some files were not shown because too many files have changed in this diff Show More