Compare commits
71 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da83afdeaf | ||
|
|
0c7149d222 | ||
|
|
05f26e1337 | ||
|
|
4c678cfbc8 | ||
|
|
8870fae674 | ||
|
|
8348c41eab | ||
|
|
638044820d | ||
|
|
1f952f6ac9 | ||
|
|
96b8a3ecac | ||
|
|
c352381ce9 | ||
|
|
9775a73d1a | ||
|
|
088c434d65 | ||
|
|
9863db9db4 | ||
|
|
6bfb4c8f71 | ||
|
|
ac6cbb7134 | ||
|
|
641783df8f | ||
|
|
c0b9e93b77 | ||
|
|
9368e17a92 | ||
|
|
91fd4e30f2 | ||
|
|
57f1d8ef8d | ||
|
|
d1b53c8d82 | ||
|
|
7322c99e5f | ||
|
|
467adc1acd | ||
|
|
0483dfae1a | ||
|
|
8d1f5bfbd2 | ||
|
|
fdf90c6d75 | ||
|
|
097c25b164 | ||
|
|
d24ab14126 | ||
|
|
54c359d1e3 | ||
|
|
600f325d87 | ||
|
|
44f10c8dee | ||
|
|
4e42ba54e5 | ||
|
|
d155167ea2 | ||
|
|
e090a1c6bd | ||
|
|
e8af3f29d2 | ||
|
|
49643d8134 | ||
|
|
92c89312fc | ||
|
|
e0bd27b674 | ||
|
|
103380134f | ||
|
|
021af4fd00 | ||
|
|
c8ad039612 | ||
|
|
8d5cde6eff | ||
|
|
ec4e2d1fab | ||
|
|
b5c0c1e163 | ||
|
|
03137086db | ||
|
|
4562f7af9a | ||
|
|
f3f57f90fe | ||
|
|
b5d61f214d | ||
|
|
3cf88a4d6c | ||
|
|
67560296c6 | ||
|
|
d72d753b1a | ||
|
|
a3ffea6a64 | ||
|
|
b014fdacdb | ||
|
|
1073346c7f | ||
|
|
3053a62375 | ||
|
|
d718eab351 | ||
|
|
c27a626b5b | ||
|
|
6f9918552c | ||
|
|
dcd13a7566 | ||
|
|
757aafa582 | ||
|
|
1776448df2 | ||
|
|
9a9e5844d3 | ||
|
|
64dcb40db1 | ||
|
|
ba4213d956 | ||
|
|
484641003c | ||
|
|
268942c8fe | ||
|
|
6b9cc0ed23 | ||
|
|
1d0fe75e7c | ||
|
|
644ee0043e | ||
|
|
a7fb80e612 | ||
|
|
2ce5bb9bd6 |
@@ -40,7 +40,7 @@ def parse_imports(file_name):
|
||||
|
||||
def parse_imports_recursive(file_name, path_list=[]):
|
||||
q = queue.Queue() # create a FIFO queue
|
||||
# file_name can be a string or a list for the convience
|
||||
# file_name can be a string or a list for the convenience
|
||||
if isinstance(file_name, str):
|
||||
q.put(file_name)
|
||||
elif isinstance(file_name, list):
|
||||
|
||||
6
.codespellrc
Normal file
6
.codespellrc
Normal file
@@ -0,0 +1,6 @@
|
||||
; SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||
; SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
[codespell]
|
||||
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES
|
||||
ignore-words-list = aci,allright,ba,deques,froms,hda,inout,lod,masia,nam,nax,nd,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
|
||||
17
.github/workflows/codespell.yml
vendored
Normal file
17
.github/workflows/codespell.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
# SPDX-FileCopyrightText: 2023 yuzu Emulator Project
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
# GitHub Action to automate the identification of common misspellings in text files.
|
||||
# https://github.com/codespell-project/actions-codespell
|
||||
# https://github.com/codespell-project/codespell
|
||||
name: codespell
|
||||
on: pull_request
|
||||
permissions: {}
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
@@ -344,12 +344,12 @@ if(ENABLE_QT)
|
||||
find_package(PkgConfig REQUIRED)
|
||||
pkg_check_modules(QT_DEP_GLU QUIET glu>=9.0.0)
|
||||
if (NOT QT_DEP_GLU_FOUND)
|
||||
message(FATAL_ERROR "Qt bundled pacakge dependency `glu` not found. \
|
||||
message(FATAL_ERROR "Qt bundled package dependency `glu` not found. \
|
||||
Perhaps `libglu1-mesa-dev` needs to be installed?")
|
||||
endif()
|
||||
pkg_check_modules(QT_DEP_MESA QUIET dri>=20.0.8)
|
||||
if (NOT QT_DEP_MESA_FOUND)
|
||||
message(FATAL_ERROR "Qt bundled pacakge dependency `dri` not found. \
|
||||
message(FATAL_ERROR "Qt bundled package dependency `dri` not found. \
|
||||
Perhaps `mesa-common-dev` needs to be installed?")
|
||||
endif()
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# FFmpeg_LIBRARIES: aggregate all the paths to the libraries
|
||||
# FFmpeg_FOUND: True if all components have been found
|
||||
#
|
||||
# This module defines the following targets, which are prefered over variables:
|
||||
# This module defines the following targets, which are preferred over variables:
|
||||
#
|
||||
# FFmpeg::<component>: Target to use <component> directly, with include path,
|
||||
# library and dependencies set up. If you are using a static build, you are
|
||||
|
||||
@@ -83,7 +83,7 @@ if (MSVC)
|
||||
)
|
||||
|
||||
if (USE_CCACHE OR YUZU_USE_PRECOMPILED_HEADERS)
|
||||
# when caching, we need to use /Z7 to downgrade debug info to use an older but more cachable format
|
||||
# when caching, we need to use /Z7 to downgrade debug info to use an older but more cacheable format
|
||||
# Precompiled headers are deleted if not using /Z7. See https://github.com/nanoant/CMakePCHCompiler/issues/21
|
||||
add_compile_options(/Z7)
|
||||
else()
|
||||
|
||||
@@ -20,7 +20,7 @@ Manager::Manager(Core::System& system_) : system{system_} {
|
||||
Result Manager::AcquireSessionId(size_t& session_id) {
|
||||
if (num_free_sessions == 0) {
|
||||
LOG_ERROR(Service_Audio, "All 4 AudioIn sessions are in use, cannot create any more");
|
||||
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
|
||||
return Service::Audio::ResultOutOfSessions;
|
||||
}
|
||||
session_id = session_ids[next_session_id];
|
||||
next_session_id = (next_session_id + 1) % MaxInSessions;
|
||||
|
||||
@@ -19,7 +19,7 @@ void AudioManager::Shutdown() {
|
||||
|
||||
Result AudioManager::SetOutManager(BufferEventFunc buffer_func) {
|
||||
if (!running) {
|
||||
return Service::Audio::ERR_OPERATION_FAILED;
|
||||
return Service::Audio::ResultOperationFailed;
|
||||
}
|
||||
|
||||
std::scoped_lock l{lock};
|
||||
@@ -35,7 +35,7 @@ Result AudioManager::SetOutManager(BufferEventFunc buffer_func) {
|
||||
|
||||
Result AudioManager::SetInManager(BufferEventFunc buffer_func) {
|
||||
if (!running) {
|
||||
return Service::Audio::ERR_OPERATION_FAILED;
|
||||
return Service::Audio::ResultOperationFailed;
|
||||
}
|
||||
|
||||
std::scoped_lock l{lock};
|
||||
|
||||
@@ -19,7 +19,7 @@ Manager::Manager(Core::System& system_) : system{system_} {
|
||||
Result Manager::AcquireSessionId(size_t& session_id) {
|
||||
if (num_free_sessions == 0) {
|
||||
LOG_ERROR(Service_Audio, "All 12 Audio Out sessions are in use, cannot create any more");
|
||||
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
|
||||
return Service::Audio::ResultOutOfSessions;
|
||||
}
|
||||
session_id = session_ids[next_session_id];
|
||||
next_session_id = (next_session_id + 1) % MaxOutSessions;
|
||||
|
||||
@@ -58,7 +58,7 @@ public:
|
||||
/**
|
||||
* Get a list of audio out device names.
|
||||
*
|
||||
* @oaram names - Output container to write names to.
|
||||
* @param names - Output container to write names to.
|
||||
* @return Number of names written.
|
||||
*/
|
||||
u32 GetAudioOutDeviceNames(
|
||||
|
||||
@@ -28,7 +28,7 @@ SystemManager& Manager::GetSystemManager() {
|
||||
Result Manager::GetWorkBufferSize(const AudioRendererParameterInternal& params,
|
||||
u64& out_count) const {
|
||||
if (!CheckValidRevision(params.revision)) {
|
||||
return Service::Audio::ERR_INVALID_REVISION;
|
||||
return Service::Audio::ResultInvalidRevision;
|
||||
}
|
||||
|
||||
out_count = System::GetWorkBufferSize(params);
|
||||
|
||||
@@ -16,7 +16,7 @@ struct AudioBuffer {
|
||||
s64 played_timestamp;
|
||||
/// Game memory address for these samples.
|
||||
VAddr samples;
|
||||
/// Unqiue identifier for this buffer.
|
||||
/// Unique identifier for this buffer.
|
||||
u64 tag;
|
||||
/// Size of the samples buffer.
|
||||
u64 size;
|
||||
|
||||
@@ -46,7 +46,7 @@ Result In::AppendBuffer(const AudioInBuffer& buffer, u64 tag) {
|
||||
if (system.AppendBuffer(buffer, tag)) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
return Service::Audio::ERR_BUFFER_COUNT_EXCEEDED;
|
||||
return Service::Audio::ResultBufferCountReached;
|
||||
}
|
||||
|
||||
void In::ReleaseAndRegisterBuffers() {
|
||||
|
||||
@@ -45,11 +45,11 @@ Result System::IsConfigValid(const std::string_view device_name,
|
||||
const AudioInParameter& in_params) const {
|
||||
if ((device_name.size() > 0) &&
|
||||
(device_name != GetDefaultDeviceName() && device_name != GetDefaultUacDeviceName())) {
|
||||
return Service::Audio::ERR_INVALID_DEVICE_NAME;
|
||||
return Service::Audio::ResultNotFound;
|
||||
}
|
||||
|
||||
if (in_params.sample_rate != TargetSampleRate && in_params.sample_rate > 0) {
|
||||
return Service::Audio::ERR_INVALID_SAMPLE_RATE;
|
||||
return Service::Audio::ResultInvalidSampleRate;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
@@ -80,7 +80,7 @@ Result System::Initialize(std::string device_name, const AudioInParameter& in_pa
|
||||
|
||||
Result System::Start() {
|
||||
if (state != State::Stopped) {
|
||||
return Service::Audio::ERR_OPERATION_FAILED;
|
||||
return Service::Audio::ResultOperationFailed;
|
||||
}
|
||||
|
||||
session->Initialize(name, sample_format, channel_count, session_id, handle,
|
||||
|
||||
@@ -46,7 +46,7 @@ Result Out::AppendBuffer(const AudioOutBuffer& buffer, const u64 tag) {
|
||||
if (system.AppendBuffer(buffer, tag)) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
return Service::Audio::ERR_BUFFER_COUNT_EXCEEDED;
|
||||
return Service::Audio::ResultBufferCountReached;
|
||||
}
|
||||
|
||||
void Out::ReleaseAndRegisterBuffers() {
|
||||
|
||||
@@ -33,11 +33,11 @@ std::string_view System::GetDefaultOutputDeviceName() const {
|
||||
Result System::IsConfigValid(std::string_view device_name,
|
||||
const AudioOutParameter& in_params) const {
|
||||
if ((device_name.size() > 0) && (device_name != GetDefaultOutputDeviceName())) {
|
||||
return Service::Audio::ERR_INVALID_DEVICE_NAME;
|
||||
return Service::Audio::ResultNotFound;
|
||||
}
|
||||
|
||||
if (in_params.sample_rate != TargetSampleRate && in_params.sample_rate > 0) {
|
||||
return Service::Audio::ERR_INVALID_SAMPLE_RATE;
|
||||
return Service::Audio::ResultInvalidSampleRate;
|
||||
}
|
||||
|
||||
if (in_params.channel_count == 0 || in_params.channel_count == 2 ||
|
||||
@@ -45,7 +45,7 @@ Result System::IsConfigValid(std::string_view device_name,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
return Service::Audio::ERR_INVALID_CHANNEL_COUNT;
|
||||
return Service::Audio::ResultInvalidChannelCount;
|
||||
}
|
||||
|
||||
Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_,
|
||||
@@ -80,7 +80,7 @@ size_t System::GetSessionId() const {
|
||||
|
||||
Result System::Start() {
|
||||
if (state != State::Stopped) {
|
||||
return Service::Audio::ERR_OPERATION_FAILED;
|
||||
return Service::Audio::ResultOperationFailed;
|
||||
}
|
||||
|
||||
session->Initialize(name, sample_format, channel_count, session_id, handle,
|
||||
|
||||
@@ -135,7 +135,7 @@ void AudioRenderer::ThreadFunc() {
|
||||
static constexpr char name[]{"AudioRenderer"};
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
if (mailbox->ADSPWaitMessage() != RenderMessage::AudioRenderer_InitializeOK) {
|
||||
LOG_ERROR(Service_Audio,
|
||||
"ADSP Audio Renderer -- Failed to receive initialize message from host!");
|
||||
@@ -165,7 +165,7 @@ void AudioRenderer::ThreadFunc() {
|
||||
// Check this buffer is valid, as it may not be used.
|
||||
if (command_buffer.buffer != 0) {
|
||||
// If there are no remaining commands (from the previous list),
|
||||
// this is a new command list, initalize it.
|
||||
// this is a new command list, initialize it.
|
||||
if (command_buffer.remaining_command_count == 0) {
|
||||
command_list_processor.Initialize(system, command_buffer.buffer,
|
||||
command_buffer.size, streams[index]);
|
||||
|
||||
@@ -22,7 +22,7 @@ Result Renderer::Initialize(const AudioRendererParameterInternal& params,
|
||||
if (!manager.AddSystem(system)) {
|
||||
LOG_ERROR(Service_Audio,
|
||||
"Both Audio Render sessions are in use, cannot create any more");
|
||||
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
|
||||
return Service::Audio::ResultOutOfSessions;
|
||||
}
|
||||
system_registered = true;
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ public:
|
||||
/**
|
||||
* Check if a variadic command buffer is supported.
|
||||
* As of Rev 5 with the added optional performance metric logging, the command
|
||||
* buffer can be a variable size, so take that into account for calcualting its size.
|
||||
* buffer can be a variable size, so take that into account for calculating its size.
|
||||
*
|
||||
* @return True if supported, otherwise false.
|
||||
*/
|
||||
|
||||
@@ -48,7 +48,7 @@ Result InfoUpdater::UpdateVoiceChannelResources(VoiceContext& voice_context) {
|
||||
LOG_ERROR(Service_Audio,
|
||||
"Consumed an incorrect voice resource size, header size={}, consumed={}",
|
||||
in_header->voice_resources_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += consumed_input_size;
|
||||
@@ -123,7 +123,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
|
||||
if (consumed_input_size != in_header->voices_size) {
|
||||
LOG_ERROR(Service_Audio, "Consumed an incorrect voices size, header size={}, consumed={}",
|
||||
in_header->voices_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
out_header->voices_size = consumed_output_size;
|
||||
@@ -184,7 +184,7 @@ Result InfoUpdater::UpdateEffectsVersion1(EffectContext& effect_context, const b
|
||||
if (consumed_input_size != in_header->effects_size) {
|
||||
LOG_ERROR(Service_Audio, "Consumed an incorrect effects size, header size={}, consumed={}",
|
||||
in_header->effects_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
out_header->effects_size = consumed_output_size;
|
||||
@@ -239,7 +239,7 @@ Result InfoUpdater::UpdateEffectsVersion2(EffectContext& effect_context, const b
|
||||
if (consumed_input_size != in_header->effects_size) {
|
||||
LOG_ERROR(Service_Audio, "Consumed an incorrect effects size, header size={}, consumed={}",
|
||||
in_header->effects_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
out_header->effects_size = consumed_output_size;
|
||||
@@ -267,7 +267,7 @@ Result InfoUpdater::UpdateMixes(MixContext& mix_context, const u32 mix_buffer_co
|
||||
}
|
||||
|
||||
if (mix_buffer_count == 0) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
std::span<const MixInfo::InParameter> in_params{
|
||||
@@ -281,13 +281,13 @@ Result InfoUpdater::UpdateMixes(MixContext& mix_context, const u32 mix_buffer_co
|
||||
total_buffer_count += params.buffer_count;
|
||||
if (params.dest_mix_id > static_cast<s32>(mix_context.GetCount()) &&
|
||||
params.dest_mix_id != UnusedMixId && params.mix_id != FinalMixId) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (total_buffer_count > mix_buffer_count) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
bool mix_dirty{false};
|
||||
@@ -317,7 +317,7 @@ Result InfoUpdater::UpdateMixes(MixContext& mix_context, const u32 mix_buffer_co
|
||||
if (mix_dirty) {
|
||||
if (behaviour.IsSplitterSupported() && splitter_context.UsingSplitter()) {
|
||||
if (!mix_context.TSortInfo(splitter_context)) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
} else {
|
||||
mix_context.SortInfo();
|
||||
@@ -327,7 +327,7 @@ Result InfoUpdater::UpdateMixes(MixContext& mix_context, const u32 mix_buffer_co
|
||||
if (consumed_input_size != in_header->mix_size) {
|
||||
LOG_ERROR(Service_Audio, "Consumed an incorrect mixes size, header size={}, consumed={}",
|
||||
in_header->mix_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += mix_count * sizeof(MixInfo::InParameter);
|
||||
@@ -384,7 +384,7 @@ Result InfoUpdater::UpdateSinks(SinkContext& sink_context, std::span<MemoryPoolI
|
||||
if (consumed_input_size != in_header->sinks_size) {
|
||||
LOG_ERROR(Service_Audio, "Consumed an incorrect sinks size, header size={}, consumed={}",
|
||||
in_header->sinks_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += consumed_input_size;
|
||||
@@ -411,7 +411,7 @@ Result InfoUpdater::UpdateMemoryPools(std::span<MemoryPoolInfo> memory_pools,
|
||||
state != MemoryPoolInfo::ResultState::MapFailed &&
|
||||
state != MemoryPoolInfo::ResultState::InUse) {
|
||||
LOG_WARNING(Service_Audio, "Invalid ResultState from updating memory pools");
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ Result InfoUpdater::UpdateMemoryPools(std::span<MemoryPoolInfo> memory_pools,
|
||||
LOG_ERROR(Service_Audio,
|
||||
"Consumed an incorrect memory pool size, header size={}, consumed={}",
|
||||
in_header->memory_pool_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += consumed_input_size;
|
||||
@@ -453,7 +453,7 @@ Result InfoUpdater::UpdatePerformanceBuffer(std::span<u8> performance_output,
|
||||
LOG_ERROR(Service_Audio,
|
||||
"Consumed an incorrect performance size, header size={}, consumed={}",
|
||||
in_header->performance_buffer_size, consumed_input_size);
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += consumed_input_size;
|
||||
@@ -467,18 +467,18 @@ Result InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& behaviour_) {
|
||||
const auto in_params{reinterpret_cast<const BehaviorInfo::InParameter*>(input)};
|
||||
|
||||
if (!CheckValidRevision(in_params->revision)) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
if (in_params->revision != behaviour_.GetUserRevision()) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
behaviour_.ClearError();
|
||||
behaviour_.UpdateFlags(in_params->flags);
|
||||
|
||||
if (in_header->behaviour_size != sizeof(BehaviorInfo::InParameter)) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += sizeof(BehaviorInfo::InParameter);
|
||||
@@ -500,7 +500,7 @@ Result InfoUpdater::UpdateErrorInfo(const BehaviorInfo& behaviour_) {
|
||||
Result InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
|
||||
u32 consumed_size{0};
|
||||
if (!splitter_context.Update(input, consumed_size)) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
|
||||
input += consumed_size;
|
||||
@@ -529,9 +529,9 @@ Result InfoUpdater::UpdateRendererInfo(const u64 elapsed_frames) {
|
||||
|
||||
Result InfoUpdater::CheckConsumedSize() {
|
||||
if (CpuAddr(input) - CpuAddr(input_origin.data()) != expected_input_size) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
} else if (CpuAddr(output) - CpuAddr(output_origin.data()) != expected_output_size) {
|
||||
return Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
return Service::Audio::ResultInvalidUpdateInfo;
|
||||
}
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ public:
|
||||
/**
|
||||
* Get this effect's parameter data.
|
||||
*
|
||||
* @return Pointer to the parametter, must be cast to the correct type.
|
||||
* @return Pointer to the parameter, must be cast to the correct type.
|
||||
*/
|
||||
u8* GetParameter() {
|
||||
return parameter.data();
|
||||
@@ -201,7 +201,7 @@ public:
|
||||
/**
|
||||
* Get this effect's parameter data.
|
||||
*
|
||||
* @return Pointer to the parametter, must be cast to the correct type.
|
||||
* @return Pointer to the parameter, must be cast to the correct type.
|
||||
*/
|
||||
u8* GetStateBuffer() {
|
||||
return state.data();
|
||||
|
||||
@@ -29,7 +29,7 @@ public:
|
||||
*/
|
||||
enum class State {
|
||||
Invalid,
|
||||
Aquired,
|
||||
Acquired,
|
||||
RequestDetach,
|
||||
Detached,
|
||||
RequestAttach,
|
||||
|
||||
@@ -92,7 +92,7 @@ bool PoolMapper::TryAttachBuffer(BehaviorInfo::ErrorInfo& error_info, AddressInf
|
||||
address_info.Setup(address, size);
|
||||
|
||||
if (!FillDspAddr(address_info)) {
|
||||
error_info.error_code = Service::Audio::ERR_POOL_MAPPING_FAILED;
|
||||
error_info.error_code = Service::Audio::ResultInvalidAddressInfo;
|
||||
error_info.address = address;
|
||||
return force_map;
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
* Splitter sort, traverse the splitter node graph and sort the sorted mixes from results.
|
||||
*
|
||||
* @param splitter_context - Splitter context for the sort.
|
||||
* @return True if the sort was successful, othewise false.
|
||||
* @return True if the sort was successful, otherwise false.
|
||||
*/
|
||||
bool TSortInfo(const SplitterContext& splitter_context);
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ struct PerformanceDetailVersion1 {
|
||||
/* 0x0D */ PerformanceEntryType entry_type;
|
||||
};
|
||||
static_assert(sizeof(PerformanceDetailVersion1) == 0x10,
|
||||
"PerformanceDetailVersion1 has the worng size!");
|
||||
"PerformanceDetailVersion1 has the wrong size!");
|
||||
|
||||
struct PerformanceDetailVersion2 {
|
||||
/* 0x00 */ u32 node_id;
|
||||
@@ -45,6 +45,6 @@ struct PerformanceDetailVersion2 {
|
||||
/* 0x14 */ char unk14[0x4];
|
||||
};
|
||||
static_assert(sizeof(PerformanceDetailVersion2) == 0x18,
|
||||
"PerformanceDetailVersion2 has the worng size!");
|
||||
"PerformanceDetailVersion2 has the wrong size!");
|
||||
|
||||
} // namespace AudioCore::AudioRenderer
|
||||
|
||||
@@ -22,7 +22,7 @@ struct PerformanceEntryVersion1 {
|
||||
/* 0x0C */ PerformanceEntryType entry_type;
|
||||
};
|
||||
static_assert(sizeof(PerformanceEntryVersion1) == 0x10,
|
||||
"PerformanceEntryVersion1 has the worng size!");
|
||||
"PerformanceEntryVersion1 has the wrong size!");
|
||||
|
||||
struct PerformanceEntryVersion2 {
|
||||
/* 0x00 */ u32 node_id;
|
||||
@@ -32,6 +32,6 @@ struct PerformanceEntryVersion2 {
|
||||
/* 0x0D */ char unk0D[0xB];
|
||||
};
|
||||
static_assert(sizeof(PerformanceEntryVersion2) == 0x18,
|
||||
"PerformanceEntryVersion2 has the worng size!");
|
||||
"PerformanceEntryVersion2 has the wrong size!");
|
||||
|
||||
} // namespace AudioCore::AudioRenderer
|
||||
|
||||
@@ -16,7 +16,7 @@ struct PerformanceFrameHeaderVersion1 {
|
||||
/* 0x14 */ u32 frame_index;
|
||||
};
|
||||
static_assert(sizeof(PerformanceFrameHeaderVersion1) == 0x18,
|
||||
"PerformanceFrameHeaderVersion1 has the worng size!");
|
||||
"PerformanceFrameHeaderVersion1 has the wrong size!");
|
||||
|
||||
struct PerformanceFrameHeaderVersion2 {
|
||||
/* 0x00 */ u32 magic; // "PERF"
|
||||
@@ -31,6 +31,6 @@ struct PerformanceFrameHeaderVersion2 {
|
||||
/* 0x25 */ char unk25[0xB];
|
||||
};
|
||||
static_assert(sizeof(PerformanceFrameHeaderVersion2) == 0x30,
|
||||
"PerformanceFrameHeaderVersion2 has the worng size!");
|
||||
"PerformanceFrameHeaderVersion2 has the wrong size!");
|
||||
|
||||
} // namespace AudioCore::AudioRenderer
|
||||
|
||||
@@ -55,7 +55,7 @@ public:
|
||||
/**
|
||||
* Get the total number of splitter destinations.
|
||||
*
|
||||
* @return Number of destiantions.
|
||||
* @return Number of destinations.
|
||||
*/
|
||||
u32 GetDataCount() const;
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ public:
|
||||
/**
|
||||
* Update this destination.
|
||||
*
|
||||
* @param params - Inpout parameters to update the destination.
|
||||
* @param params - Input parameters to update the destination.
|
||||
*/
|
||||
void Update(const InParameter& params);
|
||||
|
||||
@@ -126,9 +126,9 @@ private:
|
||||
std::array<f32, MaxMixBuffers> prev_mix_volumes{0.0f};
|
||||
/// Next destination in the mix chain
|
||||
SplitterDestinationData* next{};
|
||||
/// Is this destiantion in use?
|
||||
/// Is this destination in use?
|
||||
bool in_use{};
|
||||
/// Does this destiantion need its volumes updated?
|
||||
/// Does this destination need its volumes updated?
|
||||
bool need_update{};
|
||||
};
|
||||
|
||||
|
||||
@@ -49,14 +49,14 @@ public:
|
||||
/**
|
||||
* Get the number of destinations in this splitter.
|
||||
*
|
||||
* @return The number of destiantions.
|
||||
* @return The number of destinations.
|
||||
*/
|
||||
u32 GetDestinationCount() const;
|
||||
|
||||
/**
|
||||
* Set the number of destinations in this splitter.
|
||||
*
|
||||
* @param count - The new number of destiantions.
|
||||
* @param count - The new number of destinations.
|
||||
*/
|
||||
void SetDestinationCount(u32 count);
|
||||
|
||||
|
||||
@@ -101,15 +101,15 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
|
||||
u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
|
||||
if (!CheckValidRevision(params.revision)) {
|
||||
return Service::Audio::ERR_INVALID_REVISION;
|
||||
return Service::Audio::ResultInvalidRevision;
|
||||
}
|
||||
|
||||
if (GetWorkBufferSize(params) > transfer_memory_size) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
if (process_handle_ == 0) {
|
||||
return Service::Audio::ERR_INVALID_PROCESS_HANDLE;
|
||||
return Service::Audio::ResultInvalidHandle;
|
||||
}
|
||||
|
||||
behavior.SetUserLibRevision(params.revision);
|
||||
@@ -143,19 +143,19 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
samples_workbuffer =
|
||||
allocator.Allocate<s32>((voice_channels + mix_buffer_count) * sample_count, 0x10);
|
||||
if (samples_workbuffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
auto upsampler_workbuffer{allocator.Allocate<s32>(
|
||||
(voice_channels + mix_buffer_count) * TargetSampleCount * upsampler_count, 0x10)};
|
||||
if (upsampler_workbuffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
depop_buffer =
|
||||
allocator.Allocate<s32>(Common::AlignUp(static_cast<u32>(mix_buffer_count), 0x40), 0x40);
|
||||
if (depop_buffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
// invalidate samples_workbuffer DSP cache
|
||||
@@ -166,12 +166,12 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
}
|
||||
|
||||
if (voice_infos.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
auto sorted_voice_infos{allocator.Allocate<VoiceInfo*>(params.voices, 0x10)};
|
||||
if (sorted_voice_infos.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
std::memset(sorted_voice_infos.data(), 0, sorted_voice_infos.size_bytes());
|
||||
@@ -183,12 +183,12 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
}
|
||||
|
||||
if (voice_channel_resources.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
auto voice_cpu_states{allocator.Allocate<VoiceState>(params.voices, 0x10)};
|
||||
if (voice_cpu_states.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
for (auto& voice_state : voice_cpu_states) {
|
||||
@@ -198,7 +198,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
auto mix_infos{allocator.Allocate<MixInfo>(params.sub_mixes + 1, 0x10)};
|
||||
|
||||
if (mix_infos.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
u32 effect_process_order_count{0};
|
||||
@@ -208,7 +208,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
effect_process_order_count = params.effects * (params.sub_mixes + 1);
|
||||
effect_process_order_buffer = allocator.Allocate<s32>(effect_process_order_count, 0x10);
|
||||
if (effect_process_order_buffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
|
||||
auto sorted_mix_infos{allocator.Allocate<MixInfo*>(params.sub_mixes + 1, 0x10)};
|
||||
if (sorted_mix_infos.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
std::memset(sorted_mix_infos.data(), 0, sorted_mix_infos.size_bytes());
|
||||
@@ -235,7 +235,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
auto edge_matrix_workbuffer{allocator.Allocate<u8>(edge_matrix_size, 1)};
|
||||
|
||||
if (node_states_workbuffer.empty() || edge_matrix_workbuffer.size() == 0) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
mix_context.Initialize(sorted_mix_infos, mix_infos, params.sub_mixes + 1,
|
||||
@@ -250,7 +250,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
|
||||
upsampler_manager = allocator.Allocate<UpsamplerManager>(1, 0x10).data();
|
||||
if (upsampler_manager == nullptr) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
memory_pool_workbuffer = allocator.Allocate<MemoryPoolInfo>(memory_pool_count, 0x10);
|
||||
@@ -259,18 +259,18 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
}
|
||||
|
||||
if (memory_pool_workbuffer.empty() && memory_pool_count > 0) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
if (!splitter_context.Initialize(behavior, params, allocator)) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
std::span<EffectResultState> effect_result_states_cpu{};
|
||||
if (behavior.IsEffectInfoVersion2Supported() && params.effects > 0) {
|
||||
effect_result_states_cpu = allocator.Allocate<EffectResultState>(params.effects, 0x10);
|
||||
if (effect_result_states_cpu.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
std::memset(effect_result_states_cpu.data(), 0, effect_result_states_cpu.size_bytes());
|
||||
}
|
||||
@@ -289,7 +289,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
upsampler_workbuffer);
|
||||
|
||||
if (upsampler_infos.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
auto effect_infos{allocator.Allocate<EffectInfoBase>(params.effects, 0x40)};
|
||||
@@ -298,14 +298,14 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
}
|
||||
|
||||
if (effect_infos.empty() && params.effects > 0) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
std::span<EffectResultState> effect_result_states_dsp{};
|
||||
if (behavior.IsEffectInfoVersion2Supported() && params.effects > 0) {
|
||||
effect_result_states_dsp = allocator.Allocate<EffectResultState>(params.effects, 0x40);
|
||||
if (effect_result_states_dsp.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
std::memset(effect_result_states_dsp.data(), 0, effect_result_states_dsp.size_bytes());
|
||||
}
|
||||
@@ -319,14 +319,14 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
}
|
||||
|
||||
if (sinks.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
sink_context.Initialize(sinks, params.sinks);
|
||||
|
||||
auto voice_dsp_states{allocator.Allocate<VoiceState>(params.voices, 0x40)};
|
||||
if (voice_dsp_states.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
for (auto& voice_state : voice_dsp_states) {
|
||||
@@ -344,7 +344,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
0xC};
|
||||
performance_workbuffer = allocator.Allocate<u8>(perf_workbuffer_size, 0x40);
|
||||
if (performance_workbuffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
std::memset(performance_workbuffer.data(), 0, performance_workbuffer.size_bytes());
|
||||
performance_manager.Initialize(performance_workbuffer, performance_workbuffer.size_bytes(),
|
||||
@@ -360,7 +360,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
command_workbuffer_size = allocator.GetRemainingSize();
|
||||
command_workbuffer = allocator.Allocate<u8>(command_workbuffer_size, 0x40);
|
||||
if (command_workbuffer.empty()) {
|
||||
return Service::Audio::ERR_INSUFFICIENT_BUFFER_SIZE;
|
||||
return Service::Audio::ResultInsufficientBuffer;
|
||||
}
|
||||
|
||||
command_buffer_size = 0;
|
||||
|
||||
@@ -154,7 +154,7 @@ public:
|
||||
ExecutionMode GetExecutionMode() const;
|
||||
|
||||
/**
|
||||
* Get the rendering deivce for this system.
|
||||
* Get the rendering device for this system.
|
||||
* This is unused.
|
||||
*
|
||||
* @return Rendering device for this system.
|
||||
@@ -241,7 +241,7 @@ private:
|
||||
std::span<u8> command_workbuffer{};
|
||||
/// Size of command workbuffer
|
||||
u64 command_workbuffer_size{};
|
||||
/// Numebr of commands in the workbuffer
|
||||
/// Number of commands in the workbuffer
|
||||
u64 command_buffer_size{};
|
||||
/// Manager for upsamplers
|
||||
UpsamplerManager* upsampler_manager{};
|
||||
|
||||
@@ -36,7 +36,7 @@ public:
|
||||
/**
|
||||
* Initialize the system manager, called when any system is registered.
|
||||
*
|
||||
* @return True if sucessfully initialized, otherwise false.
|
||||
* @return True if successfully initialized, otherwise false.
|
||||
*/
|
||||
bool InitializeUnsafe();
|
||||
|
||||
@@ -50,7 +50,7 @@ public:
|
||||
* The manager does not own the system, so do not free it without calling Remove.
|
||||
*
|
||||
* @param system - The system to add.
|
||||
* @return True if succesfully added, otherwise false.
|
||||
* @return True if successfully added, otherwise false.
|
||||
*/
|
||||
bool Add(System& system);
|
||||
|
||||
@@ -58,7 +58,7 @@ public:
|
||||
* Remove an audio render system from the manager.
|
||||
*
|
||||
* @param system - The system to remove.
|
||||
* @return True if succesfully removed, otherwise false.
|
||||
* @return True if successfully removed, otherwise false.
|
||||
*/
|
||||
bool Remove(System& system);
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ void VoiceInfo::UpdateWaveBuffer(std::span<BehaviorInfo::ErrorInfo> error_info,
|
||||
if (wave_buffer_internal.start_offset * byte_size > wave_buffer_internal.size ||
|
||||
wave_buffer_internal.end_offset * byte_size > wave_buffer_internal.size) {
|
||||
LOG_ERROR(Service_Audio, "Invalid PCM16 start/end wavebuffer sizes!");
|
||||
error_info[0].error_code = Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
error_info[0].error_code = Service::Audio::ResultInvalidUpdateInfo;
|
||||
error_info[0].address = wave_buffer_internal.address;
|
||||
return;
|
||||
}
|
||||
@@ -192,7 +192,7 @@ void VoiceInfo::UpdateWaveBuffer(std::span<BehaviorInfo::ErrorInfo> error_info,
|
||||
if (wave_buffer_internal.start_offset * byte_size > wave_buffer_internal.size ||
|
||||
wave_buffer_internal.end_offset * byte_size > wave_buffer_internal.size) {
|
||||
LOG_ERROR(Service_Audio, "Invalid PCMFloat start/end wavebuffer sizes!");
|
||||
error_info[0].error_code = Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
error_info[0].error_code = Service::Audio::ResultInvalidUpdateInfo;
|
||||
error_info[0].address = wave_buffer_internal.address;
|
||||
return;
|
||||
}
|
||||
@@ -216,7 +216,7 @@ void VoiceInfo::UpdateWaveBuffer(std::span<BehaviorInfo::ErrorInfo> error_info,
|
||||
if (start > static_cast<s64>(wave_buffer_internal.size) ||
|
||||
end > static_cast<s64>(wave_buffer_internal.size)) {
|
||||
LOG_ERROR(Service_Audio, "Invalid ADPCM start/end wavebuffer sizes!");
|
||||
error_info[0].error_code = Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
error_info[0].error_code = Service::Audio::ResultInvalidUpdateInfo;
|
||||
error_info[0].address = wave_buffer_internal.address;
|
||||
return;
|
||||
}
|
||||
@@ -228,7 +228,7 @@ void VoiceInfo::UpdateWaveBuffer(std::span<BehaviorInfo::ErrorInfo> error_info,
|
||||
|
||||
if (wave_buffer_internal.start_offset < 0 || wave_buffer_internal.end_offset < 0) {
|
||||
LOG_ERROR(Service_Audio, "Invalid input start/end wavebuffer sizes!");
|
||||
error_info[0].error_code = Service::Audio::ERR_INVALID_UPDATE_DATA;
|
||||
error_info[0].error_code = Service::Audio::ResultInvalidUpdateInfo;
|
||||
error_info[0].address = wave_buffer_internal.address;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ public:
|
||||
void Initialize();
|
||||
|
||||
/**
|
||||
* Does this voice ned an update?
|
||||
* Does this voice need an update?
|
||||
*
|
||||
* @param params - Input parameters to check matching.
|
||||
*
|
||||
@@ -236,7 +236,7 @@ public:
|
||||
*
|
||||
* @param error_info - Output array of errors.
|
||||
* @param wave_buffer - The wavebuffer to be updated.
|
||||
* @param wave_buffer_internal - Input parametters to be used for the update.
|
||||
* @param wave_buffer_internal - Input parameters to be used for the update.
|
||||
* @param sample_format - Sample format of the wavebuffer.
|
||||
* @param valid - Is this wavebuffer valid?
|
||||
* @param pool_mapper - Used to map the wavebuffers.
|
||||
|
||||
@@ -66,7 +66,7 @@ public:
|
||||
* @param description The room description
|
||||
* @param port The port of the room
|
||||
* @param net_version The version of the libNetwork that gets used
|
||||
* @param has_password True if the room is passowrd protected
|
||||
* @param has_password True if the room is password protected
|
||||
* @param preferred_game The preferred game of the room
|
||||
* @param preferred_game_id The title id of the preferred game
|
||||
*/
|
||||
|
||||
@@ -3,19 +3,21 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <version>
|
||||
|
||||
#ifdef __cpp_lib_bit_cast
|
||||
#include <bit>
|
||||
#endif
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <typename To, typename From>
|
||||
[[nodiscard]] std::enable_if_t<sizeof(To) == sizeof(From) && std::is_trivially_copyable_v<From> &&
|
||||
std::is_trivially_copyable_v<To>,
|
||||
To>
|
||||
BitCast(const From& src) noexcept {
|
||||
To dst;
|
||||
std::memcpy(&dst, &src, sizeof(To));
|
||||
return dst;
|
||||
constexpr inline To BitCast(const From& from) {
|
||||
#ifdef __cpp_lib_bit_cast
|
||||
return std::bit_cast<To>(from);
|
||||
#else
|
||||
return __builtin_bit_cast(To, from);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -90,7 +90,7 @@ Fiber::~Fiber() {
|
||||
}
|
||||
|
||||
void Fiber::Exit() {
|
||||
ASSERT_MSG(impl->is_thread_fiber, "Exitting non main thread fiber");
|
||||
ASSERT_MSG(impl->is_thread_fiber, "Exiting non main thread fiber");
|
||||
if (!impl->is_thread_fiber) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ class FixedPoint;
|
||||
namespace detail {
|
||||
|
||||
// helper templates to make magic with types :)
|
||||
// these allow us to determine resonable types from
|
||||
// these allow us to determine reasonable types from
|
||||
// a desired size, they also let us infer the next largest type
|
||||
// from a type which is nice for the division op
|
||||
template <size_t T>
|
||||
|
||||
@@ -322,7 +322,7 @@ private:
|
||||
}
|
||||
|
||||
/// Return true when a given memory region is a "nieche" and the placeholders don't have to be
|
||||
/// splitted.
|
||||
/// split.
|
||||
bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
|
||||
const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
|
||||
if (it != placeholders.end() && it->lower() == virtual_offset + length) {
|
||||
@@ -484,7 +484,7 @@ class HostMemory::Impl {
|
||||
public:
|
||||
explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) {
|
||||
// This is just a place holder.
|
||||
// Please implement fastmem in a propper way on your platform.
|
||||
// Please implement fastmem in a proper way on your platform.
|
||||
throw std::bad_alloc{};
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
namespace Common::Input {
|
||||
|
||||
// Type of data that is expected to recieve or send
|
||||
// Type of data that is expected to receive or send
|
||||
enum class InputType {
|
||||
None,
|
||||
Battery,
|
||||
@@ -46,7 +46,7 @@ enum class PollingMode {
|
||||
// Constant polling of buttons, analogs and motion data
|
||||
Active,
|
||||
// Only update on button change, digital analogs
|
||||
Pasive,
|
||||
Passive,
|
||||
// Enable near field communication polling
|
||||
NFC,
|
||||
// Enable infrared camera polling
|
||||
@@ -103,7 +103,7 @@ enum class VibrationAmplificationType {
|
||||
struct AnalogProperties {
|
||||
// Anything below this value will be detected as zero
|
||||
float deadzone{};
|
||||
// Anyting above this values will be detected as one
|
||||
// Anything above this values will be detected as one
|
||||
float range{1.0f};
|
||||
// Minimum value to be detected as active
|
||||
float threshold{0.5f};
|
||||
@@ -209,7 +209,7 @@ struct LedStatus {
|
||||
bool led_4{};
|
||||
};
|
||||
|
||||
// Raw data fom camera
|
||||
// Raw data from camera
|
||||
struct CameraStatus {
|
||||
CameraFormat format{CameraFormat::None};
|
||||
std::vector<u8> data{};
|
||||
@@ -428,7 +428,7 @@ inline void UnregisterOutputFactory(const std::string& name) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an input device from given paramters.
|
||||
* Create an input device from given parameters.
|
||||
* @tparam InputDeviceType the type of input devices to create
|
||||
* @param params a serialized ParamPackage string that contains all parameters for creating the
|
||||
* device
|
||||
|
||||
@@ -503,7 +503,7 @@ struct Values {
|
||||
Setting<bool> tas_loop{false, "tas_loop"};
|
||||
|
||||
Setting<bool> mouse_panning{false, "mouse_panning"};
|
||||
Setting<u8, true> mouse_panning_sensitivity{10, 1, 100, "mouse_panning_sensitivity"};
|
||||
Setting<u8, true> mouse_panning_sensitivity{50, 1, 100, "mouse_panning_sensitivity"};
|
||||
Setting<bool> mouse_enabled{false, "mouse_enabled"};
|
||||
|
||||
Setting<bool> emulate_analog_keyboard{false, "emulate_analog_keyboard"};
|
||||
|
||||
@@ -23,6 +23,19 @@ static s64 WindowsQueryPerformanceCounter() {
|
||||
QueryPerformanceCounter(&counter);
|
||||
return counter.QuadPart;
|
||||
}
|
||||
|
||||
static s64 GetSystemTimeNS() {
|
||||
// GetSystemTimePreciseAsFileTime returns the file time in 100ns units.
|
||||
static constexpr s64 Multiplier = 100;
|
||||
// Convert Windows epoch to Unix epoch.
|
||||
static constexpr s64 WindowsEpochToUnixEpochNS = 0x19DB1DED53E8000LL;
|
||||
|
||||
FILETIME filetime;
|
||||
GetSystemTimePreciseAsFileTime(&filetime);
|
||||
return Multiplier * ((static_cast<s64>(filetime.dwHighDateTime) << 32) +
|
||||
static_cast<s64>(filetime.dwLowDateTime)) -
|
||||
WindowsEpochToUnixEpochNS;
|
||||
}
|
||||
#endif
|
||||
|
||||
SteadyClock::time_point SteadyClock::Now() noexcept {
|
||||
@@ -53,4 +66,16 @@ SteadyClock::time_point SteadyClock::Now() noexcept {
|
||||
#endif
|
||||
}
|
||||
|
||||
RealTimeClock::time_point RealTimeClock::Now() noexcept {
|
||||
#if defined(_WIN32)
|
||||
return time_point{duration{GetSystemTimeNS()}};
|
||||
#elif defined(__APPLE__)
|
||||
return time_point{duration{clock_gettime_nsec_np(CLOCK_REALTIME)}};
|
||||
#else
|
||||
timespec ts;
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
return time_point{std::chrono::seconds{ts.tv_sec} + std::chrono::nanoseconds{ts.tv_nsec}};
|
||||
#endif
|
||||
}
|
||||
|
||||
}; // namespace Common
|
||||
|
||||
@@ -20,4 +20,15 @@ struct SteadyClock {
|
||||
[[nodiscard]] static time_point Now() noexcept;
|
||||
};
|
||||
|
||||
struct RealTimeClock {
|
||||
using rep = s64;
|
||||
using period = std::nano;
|
||||
using duration = std::chrono::nanoseconds;
|
||||
using time_point = std::chrono::time_point<RealTimeClock>;
|
||||
|
||||
static constexpr bool is_steady = false;
|
||||
|
||||
[[nodiscard]] static time_point Now() noexcept;
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -229,7 +229,7 @@ public:
|
||||
value = swap(swap() - 1);
|
||||
return old;
|
||||
}
|
||||
// Comparaison
|
||||
// Comparison
|
||||
// v == i
|
||||
bool operator==(const swapped_t& i) const {
|
||||
return swap() == i.swap();
|
||||
@@ -368,7 +368,7 @@ public:
|
||||
// Member
|
||||
/** todo **/
|
||||
|
||||
// Arithmetics
|
||||
// Arithmetic
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator+(const S& p, const swapped_t v);
|
||||
|
||||
@@ -384,7 +384,7 @@ public:
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator%(const S& p, const swapped_t v);
|
||||
|
||||
// Arithmetics + assignments
|
||||
// Arithmetic + assignments
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator+=(const S& p, const swapped_t v);
|
||||
|
||||
@@ -415,7 +415,7 @@ public:
|
||||
friend bool operator==(const S& p, const swapped_t v);
|
||||
};
|
||||
|
||||
// Arithmetics
|
||||
// Arithmetic
|
||||
template <typename S, typename T, typename F>
|
||||
S operator+(const S& i, const swap_struct_t<T, F> v) {
|
||||
return i + v.swap();
|
||||
@@ -441,7 +441,7 @@ S operator%(const S& i, const swap_struct_t<T, F> v) {
|
||||
return i % v.swap();
|
||||
}
|
||||
|
||||
// Arithmetics + assignments
|
||||
// Arithmetic + assignments
|
||||
template <typename S, typename T, typename F>
|
||||
S& operator+=(S& i, const swap_struct_t<T, F> v) {
|
||||
i += v.swap();
|
||||
@@ -465,7 +465,7 @@ S operator&(const swap_struct_t<T, F> v, const S& i) {
|
||||
return static_cast<S>(v.swap() & i);
|
||||
}
|
||||
|
||||
// Comparaison
|
||||
// Comparison
|
||||
template <typename S, typename T, typename F>
|
||||
bool operator<(const S& p, const swap_struct_t<T, F> v) {
|
||||
return p < v.swap();
|
||||
|
||||
@@ -53,11 +53,11 @@ u64 EstimateRDTSCFrequency() {
|
||||
FencedRDTSC();
|
||||
|
||||
// Get the current time.
|
||||
const auto start_time = Common::SteadyClock::Now();
|
||||
const auto start_time = Common::RealTimeClock::Now();
|
||||
const u64 tsc_start = FencedRDTSC();
|
||||
// Wait for 250 milliseconds.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{250});
|
||||
const auto end_time = Common::SteadyClock::Now();
|
||||
const auto end_time = Common::RealTimeClock::Now();
|
||||
const u64 tsc_end = FencedRDTSC();
|
||||
// Calculate differences.
|
||||
const u64 timer_diff = static_cast<u64>(
|
||||
@@ -72,13 +72,29 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
|
||||
u64 rtsc_frequency_)
|
||||
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
|
||||
rtsc_frequency_} {
|
||||
// Thread to re-adjust the RDTSC frequency after 10 seconds has elapsed.
|
||||
time_sync_thread = std::jthread{[this](std::stop_token token) {
|
||||
// Get the current time.
|
||||
const auto start_time = Common::RealTimeClock::Now();
|
||||
const u64 tsc_start = FencedRDTSC();
|
||||
// Wait for 10 seconds.
|
||||
if (!Common::StoppableTimedWait(token, std::chrono::seconds{10})) {
|
||||
return;
|
||||
}
|
||||
const auto end_time = Common::RealTimeClock::Now();
|
||||
const u64 tsc_end = FencedRDTSC();
|
||||
// Calculate differences.
|
||||
const u64 timer_diff = static_cast<u64>(
|
||||
std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
|
||||
const u64 tsc_diff = tsc_end - tsc_start;
|
||||
const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
|
||||
rtsc_frequency = tsc_freq;
|
||||
CalculateAndSetFactors();
|
||||
}};
|
||||
|
||||
time_point.inner.last_measure = FencedRDTSC();
|
||||
time_point.inner.accumulated_ticks = 0U;
|
||||
ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
|
||||
us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
|
||||
ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
|
||||
clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
|
||||
cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
|
||||
CalculateAndSetFactors();
|
||||
}
|
||||
|
||||
u64 NativeClock::GetRTSC() {
|
||||
@@ -138,6 +154,14 @@ u64 NativeClock::GetCPUCycles() {
|
||||
return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
|
||||
}
|
||||
|
||||
void NativeClock::CalculateAndSetFactors() {
|
||||
ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
|
||||
us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
|
||||
ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
|
||||
clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
|
||||
cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
|
||||
}
|
||||
|
||||
} // namespace X64
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/polyfill_thread.h"
|
||||
#include "common/wall_clock.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -28,6 +29,8 @@ public:
|
||||
private:
|
||||
u64 GetRTSC();
|
||||
|
||||
void CalculateAndSetFactors();
|
||||
|
||||
union alignas(16) TimePoint {
|
||||
TimePoint() : pack{} {}
|
||||
u128 pack{};
|
||||
@@ -47,6 +50,8 @@ private:
|
||||
u64 ms_rtsc_factor{};
|
||||
|
||||
u64 rtsc_frequency;
|
||||
|
||||
std::jthread time_sync_thread;
|
||||
};
|
||||
} // namespace X64
|
||||
|
||||
|
||||
@@ -158,6 +158,7 @@ add_library(core STATIC
|
||||
hid/motion_input.h
|
||||
hle/api_version.h
|
||||
hle/ipc.h
|
||||
hle/kernel/board/nintendo/nx/k_memory_layout.cpp
|
||||
hle/kernel/board/nintendo/nx/k_memory_layout.h
|
||||
hle/kernel/board/nintendo/nx/k_system_control.cpp
|
||||
hle/kernel/board/nintendo/nx/k_system_control.h
|
||||
@@ -211,12 +212,10 @@ add_library(core STATIC
|
||||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
hle/kernel/k_linked_list.h
|
||||
hle/kernel/k_memory_block.h
|
||||
hle/kernel/k_memory_block_manager.cpp
|
||||
hle/kernel/k_memory_block_manager.h
|
||||
hle/kernel/k_memory_layout.cpp
|
||||
hle/kernel/k_memory_layout.board.nintendo_nx.cpp
|
||||
hle/kernel/k_memory_layout.h
|
||||
hle/kernel/k_memory_manager.cpp
|
||||
hle/kernel/k_memory_manager.h
|
||||
@@ -454,7 +453,6 @@ add_library(core STATIC
|
||||
hle/service/filesystem/fsp_srv.h
|
||||
hle/service/fgm/fgm.cpp
|
||||
hle/service/fgm/fgm.h
|
||||
hle/service/friend/errors.h
|
||||
hle/service/friend/friend.cpp
|
||||
hle/service/friend/friend.h
|
||||
hle/service/friend/friend_interface.cpp
|
||||
|
||||
@@ -358,7 +358,7 @@ struct System::Impl {
|
||||
void ShutdownMainProcess() {
|
||||
SetShuttingDown(true);
|
||||
|
||||
// Log last frame performance stats if game was loded
|
||||
// Log last frame performance stats if game was loaded
|
||||
if (perf_stats) {
|
||||
const auto perf_results = GetAndResetPerfStats();
|
||||
constexpr auto performance = Common::Telemetry::FieldType::Performance;
|
||||
@@ -434,7 +434,7 @@ struct System::Impl {
|
||||
}
|
||||
|
||||
Service::Glue::ApplicationLaunchProperty launch{};
|
||||
launch.title_id = process.GetProgramID();
|
||||
launch.title_id = process.GetProgramId();
|
||||
|
||||
FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider};
|
||||
launch.version = pm.GetGameVersion().value_or(0);
|
||||
@@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const {
|
||||
}
|
||||
|
||||
u64 System::GetApplicationProcessProgramID() const {
|
||||
return impl->kernel.ApplicationProcess()->GetProgramID();
|
||||
return impl->kernel.ApplicationProcess()->GetProgramId();
|
||||
}
|
||||
|
||||
Loader::ResultStatus System::GetGameName(std::string& out) const {
|
||||
|
||||
@@ -146,7 +146,7 @@ public:
|
||||
|
||||
/**
|
||||
* Initializes the system
|
||||
* This function will initialize core functionaility used for system emulation
|
||||
* This function will initialize core functionality used for system emulation
|
||||
*/
|
||||
void Initialize();
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
static constexpr char name[] = "HostTiming";
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
|
||||
@@ -150,7 +150,7 @@ private:
|
||||
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||
// accomodated by the standard adaptor class.
|
||||
// accommodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ void CpuManager::RunThread(std::stop_token token, std::size_t core) {
|
||||
}
|
||||
MicroProfileOnThreadCreate(name.c_str());
|
||||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
auto& data = core_data[core];
|
||||
data.host_context = Common::Fiber::ThreadToFiber();
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace Core::Crypto {
|
||||
|
||||
// Sits on top of a VirtualFile and provides CTR-mode AES decription.
|
||||
// Sits on top of a VirtualFile and provides CTR-mode AES description.
|
||||
class CTREncryptionLayer : public EncryptionLayer {
|
||||
public:
|
||||
using IVData = std::array<u8, 16>;
|
||||
|
||||
@@ -249,7 +249,7 @@ public:
|
||||
|
||||
static bool KeyFileExists(bool title);
|
||||
|
||||
// Call before using the sd seed to attempt to derive it if it dosen't exist. Needs system
|
||||
// Call before using the sd seed to attempt to derive it if it doesn't exist. Needs system
|
||||
// save 8*43 and the private file to exist.
|
||||
void DeriveSDSeedLazy();
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
namespace Core::Crypto {
|
||||
|
||||
// Sits on top of a VirtualFile and provides XTS-mode AES decription.
|
||||
// Sits on top of a VirtualFile and provides XTS-mode AES description.
|
||||
class XTSEncryptionLayer : public EncryptionLayer {
|
||||
public:
|
||||
XTSEncryptionLayer(FileSys::VirtualFile base, Key256 key);
|
||||
|
||||
@@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)};
|
||||
const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
@@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
|
||||
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)};
|
||||
const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
@@ -576,7 +576,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
std::vector<std::string> thread_ids;
|
||||
for (const auto& thread : threads) {
|
||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID()));
|
||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
|
||||
}
|
||||
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
|
||||
} else if (command.starts_with("sThreadInfo")) {
|
||||
@@ -591,11 +591,11 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
for (const auto* thread : threads) {
|
||||
auto thread_name{GetThreadName(system, thread)};
|
||||
if (!thread_name) {
|
||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadID());
|
||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
|
||||
}
|
||||
|
||||
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
|
||||
thread->GetThreadID(), thread->GetActiveCore(),
|
||||
thread->GetThreadId(), thread->GetActiveCore(),
|
||||
EscapeXML(*thread_name), GetThreadState(thread));
|
||||
}
|
||||
|
||||
@@ -756,7 +756,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
|
||||
reply = fmt::format("Process: {:#x} ({})\n"
|
||||
"Program Id: {:#018x}\n",
|
||||
process->GetProcessID(), process->GetName(), process->GetProgramID());
|
||||
process->GetProcessId(), process->GetName(), process->GetProgramId());
|
||||
reply +=
|
||||
fmt::format("Layout:\n"
|
||||
" Alias: {:#012x} - {:#012x}\n"
|
||||
@@ -819,7 +819,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
||||
const auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||
for (auto* thread : threads) {
|
||||
if (thread->GetThreadID() == thread_id) {
|
||||
if (thread->GetThreadId() == thread_id) {
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist
|
||||
std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
|
||||
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
|
||||
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
|
||||
}
|
||||
|
||||
u32 GDBStubA64::BreakpointInstruction() const {
|
||||
@@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist
|
||||
std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
|
||||
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
|
||||
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
|
||||
}
|
||||
|
||||
u32 GDBStubA32::BreakpointInstruction() const {
|
||||
|
||||
@@ -93,7 +93,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
|
||||
pfs->GetFile("StartupMovie.gif") != nullptr;
|
||||
}
|
||||
|
||||
// An implementation of VfsDirectory that represents a Nintendo Content Archive (NCA) conatiner.
|
||||
// An implementation of VfsDirectory that represents a Nintendo Content Archive (NCA) container.
|
||||
// After construction, use GetStatus to determine if the file is valid and ready to be used.
|
||||
class NCA : public ReadOnlyVfsDirectory {
|
||||
public:
|
||||
|
||||
@@ -162,7 +162,7 @@ public:
|
||||
InstallResult InstallEntry(const NSP& nsp, bool overwrite_if_exists = false,
|
||||
const VfsCopyFunction& copy = &VfsRawCopy);
|
||||
|
||||
// Due to the fact that we must use Meta-type NCAs to determine the existance of files, this
|
||||
// Due to the fact that we must use Meta-type NCAs to determine the existence of files, this
|
||||
// poses quite a challenge. Instead of creating a new meta NCA for this file, yuzu will create a
|
||||
// dir inside the NAND called 'yuzu_meta' and store the raw CNMT there.
|
||||
// TODO(DarkLordZach): Author real meta-type NCAs and install those.
|
||||
|
||||
@@ -45,7 +45,7 @@ public:
|
||||
// Return whether or not the user has write permission on this filesystem.
|
||||
virtual bool IsWritable() const;
|
||||
|
||||
// Determine if the entry at path is non-existant, a file, or a directory.
|
||||
// Determine if the entry at path is non-existent, a file, or a directory.
|
||||
virtual VfsEntryType GetEntryType(std::string_view path) const;
|
||||
|
||||
// Opens the file with path relative to root. If it doesn't exist, returns nullptr.
|
||||
@@ -58,7 +58,7 @@ public:
|
||||
// Moves the file from old_path to new_path, returning the moved file on success and nullptr on
|
||||
// failure.
|
||||
virtual VirtualFile MoveFile(std::string_view old_path, std::string_view new_path);
|
||||
// Deletes the file with path relative to root, returing true on success.
|
||||
// Deletes the file with path relative to root, returning true on success.
|
||||
virtual bool DeleteFile(std::string_view path);
|
||||
|
||||
// Opens the directory with path relative to root. If it doesn't exist, returns nullptr.
|
||||
@@ -71,7 +71,7 @@ public:
|
||||
// Moves the directory from old_path to new_path, returning the moved directory on success and
|
||||
// nullptr on failure.
|
||||
virtual VirtualDir MoveDirectory(std::string_view old_path, std::string_view new_path);
|
||||
// Deletes the directory with path relative to root, returing true on success.
|
||||
// Deletes the directory with path relative to root, returning true on success.
|
||||
virtual bool DeleteDirectory(std::string_view path);
|
||||
|
||||
protected:
|
||||
@@ -144,7 +144,7 @@ public:
|
||||
return Read(reinterpret_cast<u8*>(data), sizeof(T), offset);
|
||||
}
|
||||
|
||||
// Writes exactly one byte to offset in file and retuns whether or not the byte was written
|
||||
// Writes exactly one byte to offset in file and returns whether or not the byte was written
|
||||
// successfully.
|
||||
virtual bool WriteByte(u8 data, std::size_t offset = 0);
|
||||
// Writes a vector of bytes to offset in file and returns the number of bytes successfully
|
||||
@@ -191,13 +191,13 @@ public:
|
||||
VfsDirectory() = default;
|
||||
virtual ~VfsDirectory();
|
||||
|
||||
// Retrives the file located at path as if the current directory was root. Returns nullptr if
|
||||
// Retrieves the file located at path as if the current directory was root. Returns nullptr if
|
||||
// not found.
|
||||
virtual VirtualFile GetFileRelative(std::string_view path) const;
|
||||
// Calls GetFileRelative(path) on the root of the current directory.
|
||||
virtual VirtualFile GetFileAbsolute(std::string_view path) const;
|
||||
|
||||
// Retrives the directory located at path as if the current directory was root. Returns nullptr
|
||||
// Retrieves the directory located at path as if the current directory was root. Returns nullptr
|
||||
// if not found.
|
||||
virtual VirtualDir GetDirectoryRelative(std::string_view path) const;
|
||||
// Calls GetDirectoryRelative(path) on the root of the current directory.
|
||||
@@ -205,7 +205,7 @@ public:
|
||||
|
||||
// Returns a vector containing all of the files in this directory.
|
||||
virtual std::vector<VirtualFile> GetFiles() const = 0;
|
||||
// Returns the file with filename matching name. Returns nullptr if directory dosen't have a
|
||||
// Returns the file with filename matching name. Returns nullptr if directory doesn't have a
|
||||
// file with name.
|
||||
virtual VirtualFile GetFile(std::string_view name) const;
|
||||
|
||||
@@ -214,7 +214,7 @@ public:
|
||||
|
||||
// Returns a vector containing all of the subdirectories in this directory.
|
||||
virtual std::vector<VirtualDir> GetSubdirectories() const = 0;
|
||||
// Returns the directory with name matching name. Returns nullptr if directory dosen't have a
|
||||
// Returns the directory with name matching name. Returns nullptr if directory doesn't have a
|
||||
// directory with name.
|
||||
virtual VirtualDir GetSubdirectory(std::string_view name) const;
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ private:
|
||||
boost::container::flat_map<std::string, std::weak_ptr<Common::FS::IOFile>> cache;
|
||||
};
|
||||
|
||||
// An implmentation of VfsFile that represents a file on the user's computer.
|
||||
// An implementation of VfsFile that represents a file on the user's computer.
|
||||
class RealVfsFile : public VfsFile {
|
||||
friend class RealVfsDirectory;
|
||||
friend class RealVfsFilesystem;
|
||||
|
||||
@@ -205,7 +205,7 @@ protected:
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a screen postion into the equivalent touchscreen position.
|
||||
* Converts a screen position into the equivalent touchscreen position.
|
||||
*/
|
||||
std::pair<f32, f32> MapToTouchScreen(u32 framebuffer_x, u32 framebuffer_y) const;
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ struct ControllerStatus {
|
||||
RingAnalogValue ring_analog_value{};
|
||||
NfcValues nfc_values{};
|
||||
|
||||
// Data for HID serices
|
||||
// Data for HID services
|
||||
HomeButtonState home_button_state{};
|
||||
CaptureButtonState capture_button_state{};
|
||||
NpadButtonState npad_button_state{};
|
||||
@@ -357,7 +357,7 @@ public:
|
||||
|
||||
/**
|
||||
* Sends a small vibration to the output device
|
||||
* @return true if SetVibration was successfull
|
||||
* @return true if SetVibration was successful
|
||||
*/
|
||||
bool IsVibrationEnabled(std::size_t device_index);
|
||||
|
||||
@@ -373,7 +373,7 @@ public:
|
||||
/**
|
||||
* Sets the desired camera format to be polled from a controller
|
||||
* @param camera_format size of each frame
|
||||
* @return true if SetCameraFormat was successfull
|
||||
* @return true if SetCameraFormat was successful
|
||||
*/
|
||||
bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format);
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ struct DeviceStatus {
|
||||
MouseWheelValues mouse_wheel_values{};
|
||||
MouseStickValue mouse_stick_value{};
|
||||
|
||||
// Data for HID serices
|
||||
// Data for HID services
|
||||
KeyboardKey keyboard_state{};
|
||||
KeyboardModifier keyboard_moddifier_state{};
|
||||
MouseButton mouse_button_state{};
|
||||
@@ -75,7 +75,7 @@ struct InterfaceUpdateCallback {
|
||||
class EmulatedDevices {
|
||||
public:
|
||||
/**
|
||||
* Contains all input data related to external devices that aren't necesarily a controller
|
||||
* Contains all input data related to external devices that aren't necessarily a controller
|
||||
* This includes devices such as the keyboard or mouse
|
||||
*/
|
||||
explicit EmulatedDevices();
|
||||
|
||||
@@ -328,7 +328,7 @@ void SanitizeAnalog(Common::Input::AnalogStatus& analog, bool clamp_value) {
|
||||
// Apply center offset
|
||||
raw_value -= properties.offset;
|
||||
|
||||
// Set initial values to be formated
|
||||
// Set initial values to be formatted
|
||||
value = raw_value;
|
||||
|
||||
// Calculate vector size
|
||||
@@ -398,7 +398,7 @@ void SanitizeStick(Common::Input::AnalogStatus& analog_x, Common::Input::AnalogS
|
||||
raw_x = properties_x.inverted ? -raw_x : raw_x;
|
||||
raw_y = properties_y.inverted ? -raw_y : raw_y;
|
||||
|
||||
// Set initial values to be formated
|
||||
// Set initial values to be formatted
|
||||
x = raw_x;
|
||||
y = raw_y;
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ private:
|
||||
// Gyroscope vector measurement in radians/s.
|
||||
Common::Vec3f gyro;
|
||||
|
||||
// Vector to be substracted from gyro measurements
|
||||
// Vector to be subtracted from gyro measurements
|
||||
Common::Vec3f gyro_bias;
|
||||
|
||||
// Minimum gyro amplitude to detect if the device is moving
|
||||
|
||||
@@ -12,20 +12,19 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_)
|
||||
: kernel{kernel_}, scheduler_lock{kernel_} {}
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
: m_kernel{kernel}, m_scheduler_lock{kernel} {}
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(KThread* thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(thread);
|
||||
std::scoped_lock lock{m_global_list_guard};
|
||||
m_thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(KThread* thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
std::scoped_lock lock{m_global_list_guard};
|
||||
std::erase(m_thread_list, thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::PreemptThreads() {
|
||||
@@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {
|
||||
63,
|
||||
};
|
||||
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
KScheduler::RotateScheduledQueue(kernel, core_id, priority);
|
||||
KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
return m_scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
woken_dummy_threads.insert(thread);
|
||||
m_woken_dummy_threads.insert(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
woken_dummy_threads.erase(thread);
|
||||
m_woken_dummy_threads.erase(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
for (auto* thread : woken_dummy_threads) {
|
||||
for (auto* thread : m_woken_dummy_threads) {
|
||||
thread->DummyThreadEndWait();
|
||||
}
|
||||
|
||||
woken_dummy_threads.clear();
|
||||
m_woken_dummy_threads.clear();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,7 +33,7 @@ class GlobalSchedulerContext final {
|
||||
public:
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel_);
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
@@ -43,8 +43,9 @@ public:
|
||||
void RemoveThread(KThread* thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
[[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
|
||||
return thread_list;
|
||||
/// This is only safe to iterate while holding the scheduler lock
|
||||
const std::vector<KThread*>& GetThreadList() const {
|
||||
return m_thread_list;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -63,30 +64,26 @@ public:
|
||||
void RegisterDummyThreadForWakeup(KThread* thread);
|
||||
void WakeupWaitingDummyThreads();
|
||||
|
||||
[[nodiscard]] LockType& SchedulerLock() {
|
||||
return scheduler_lock;
|
||||
}
|
||||
|
||||
[[nodiscard]] const LockType& SchedulerLock() const {
|
||||
return scheduler_lock;
|
||||
LockType& SchedulerLock() {
|
||||
return m_scheduler_lock;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class KScopedSchedulerLock;
|
||||
friend class KScopedSchedulerLockAndSleep;
|
||||
|
||||
KernelCore& kernel;
|
||||
KernelCore& m_kernel;
|
||||
|
||||
std::atomic_bool scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
std::atomic_bool m_scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue m_priority_queue;
|
||||
LockType m_scheduler_lock;
|
||||
|
||||
/// Lists dummy threads pending wakeup on lock release
|
||||
std::set<KThread*> woken_dummy_threads;
|
||||
std::set<KThread*> m_woken_dummy_threads;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<KThread*> thread_list;
|
||||
std::mutex global_list_guard;
|
||||
std::vector<KThread*> m_thread_list;
|
||||
std::mutex m_global_list_guard;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system)
|
||||
: m_system{system}, m_kernel{system.Kernel()} {}
|
||||
KAddressArbiter::~KAddressArbiter() = default;
|
||||
|
||||
namespace {
|
||||
@@ -90,8 +90,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||
|
||||
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||
@@ -105,7 +105,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
KAddressArbiter::ThreadTree* m_tree;
|
||||
KAddressArbiter::ThreadTree* m_tree{};
|
||||
};
|
||||
|
||||
} // namespace
|
||||
@@ -114,10 +114,10 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -126,31 +126,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1),
|
||||
ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, ResultInvalidState);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -159,33 +155,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
// Determine the updated value.
|
||||
s32 new_value{};
|
||||
if (count <= 0) {
|
||||
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
new_value = value - 2;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
|
||||
while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
|
||||
if (tmp_num_waiters++ >= count) {
|
||||
break;
|
||||
}
|
||||
@@ -205,20 +201,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (value != new_value) {
|
||||
succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
|
||||
succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, &user_value, addr);
|
||||
succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(succeeded, ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, ResultInvalidState);
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -227,58 +218,60 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (decrement) {
|
||||
succeeded = DecrementIfLessThan(system, &user_value, addr, value);
|
||||
succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, &user_value, addr);
|
||||
succeeded = ReadFromUser(m_system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
// Check that the value is less than the specified one.
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidState;
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return ResultTimedOut;
|
||||
R_THROW(ResultTimedOut);
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
@@ -289,42 +282,44 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
|
||||
|
||||
Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, &user_value, addr)) {
|
||||
if (!ReadFromUser(m_system, std::addressof(user_value), addr)) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
// Check that the value is equal.
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidState;
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return ResultTimedOut;
|
||||
R_THROW(ResultTimedOut);
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
@@ -22,47 +22,46 @@ class KAddressArbiter {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
explicit KAddressArbiter(Core::System& system);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
R_RETURN(this->Signal(addr, count));
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
return SignalAndIncrementIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
[[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
R_RETURN(WaitIfLessThan(addr, value, false, timeout));
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, true, timeout);
|
||||
R_RETURN(WaitIfLessThan(addr, value, true, timeout));
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
R_RETURN(WaitIfEqual(addr, value, timeout));
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] Result Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
Result Signal(VAddr addr, s32 count);
|
||||
Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
private:
|
||||
ThreadTree m_tree;
|
||||
Core::System& m_system;
|
||||
KernelCore& m_kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -44,11 +44,11 @@ const KAddressSpaceInfo& GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Ty
|
||||
|
||||
} // namespace
|
||||
|
||||
uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) {
|
||||
std::size_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) {
|
||||
return GetAddressSpaceInfo(width, type).address;
|
||||
}
|
||||
|
||||
size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) {
|
||||
std::size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) {
|
||||
return GetAddressSpaceInfo(width, type).size;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ struct KAddressSpaceInfo final {
|
||||
Count,
|
||||
};
|
||||
|
||||
static u64 GetAddressSpaceStart(std::size_t width, Type type);
|
||||
static std::size_t GetAddressSpaceStart(std::size_t width, Type type);
|
||||
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
|
||||
|
||||
const std::size_t bit_width{};
|
||||
|
||||
@@ -13,40 +13,40 @@ class KAffinityMask {
|
||||
public:
|
||||
constexpr KAffinityMask() = default;
|
||||
|
||||
[[nodiscard]] constexpr u64 GetAffinityMask() const {
|
||||
return this->mask;
|
||||
constexpr u64 GetAffinityMask() const {
|
||||
return m_mask;
|
||||
}
|
||||
|
||||
constexpr void SetAffinityMask(u64 new_mask) {
|
||||
ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||
this->mask = new_mask;
|
||||
m_mask = new_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
|
||||
return (this->mask & GetCoreBit(core)) != 0;
|
||||
constexpr bool GetAffinity(s32 core) const {
|
||||
return (m_mask & GetCoreBit(core)) != 0;
|
||||
}
|
||||
|
||||
constexpr void SetAffinity(s32 core, bool set) {
|
||||
if (set) {
|
||||
this->mask |= GetCoreBit(core);
|
||||
m_mask |= GetCoreBit(core);
|
||||
} else {
|
||||
this->mask &= ~GetCoreBit(core);
|
||||
m_mask &= ~GetCoreBit(core);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void SetAll() {
|
||||
this->mask = AllowedAffinityMask;
|
||||
m_mask = AllowedAffinityMask;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
|
||||
static constexpr u64 GetCoreBit(s32 core) {
|
||||
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return (1ULL << core);
|
||||
}
|
||||
|
||||
static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
|
||||
|
||||
u64 mask{};
|
||||
u64 m_mask{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
|
||||
}
|
||||
|
||||
void KAutoObject::RegisterWithKernel() {
|
||||
kernel.RegisterKernelObject(this);
|
||||
m_kernel.RegisterKernelObject(this);
|
||||
}
|
||||
|
||||
void KAutoObject::UnregisterWithKernel() {
|
||||
kernel.UnregisterKernelObject(this);
|
||||
m_kernel.UnregisterKernelObject(this);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -80,7 +80,7 @@ private:
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
|
||||
|
||||
public:
|
||||
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
|
||||
explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
|
||||
RegisterWithKernel();
|
||||
}
|
||||
virtual ~KAutoObject() = default;
|
||||
@@ -164,17 +164,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
const std::string& GetName() const {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
void RegisterWithKernel();
|
||||
void UnregisterWithKernel();
|
||||
|
||||
protected:
|
||||
KernelCore& kernel;
|
||||
std::string name;
|
||||
KernelCore& m_kernel;
|
||||
|
||||
private:
|
||||
std::atomic<u32> m_ref_count{};
|
||||
@@ -184,7 +179,7 @@ class KAutoObjectWithListContainer;
|
||||
|
||||
class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
|
||||
public:
|
||||
explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {}
|
||||
explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
|
||||
|
||||
static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
|
||||
const u64 lid = lhs.GetId();
|
||||
@@ -200,7 +195,7 @@ public:
|
||||
}
|
||||
|
||||
friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) {
|
||||
return &left < &right;
|
||||
return KAutoObjectWithList::Compare(left, right) < 0;
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -208,10 +203,6 @@ public:
|
||||
return reinterpret_cast<u64>(this);
|
||||
}
|
||||
|
||||
virtual const std::string& GetName() const {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class KAutoObjectWithListContainer;
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) {
|
||||
Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
|
||||
// We're initializing an initial process.
|
||||
m_svc_access_flags.reset();
|
||||
m_irq_access_flags.reset();
|
||||
|
||||
@@ -22,7 +22,7 @@ class KCapabilities {
|
||||
public:
|
||||
constexpr explicit KCapabilities() = default;
|
||||
|
||||
Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table);
|
||||
Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
|
||||
Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
|
||||
|
||||
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
|
||||
|
||||
@@ -11,26 +11,21 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
||||
KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
KClientPort::~KClientPort() = default;
|
||||
|
||||
void KClientPort::Initialize(KPort* parent_port_, s32 max_sessions_, std::string&& name_) {
|
||||
void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
|
||||
// Set member variables.
|
||||
num_sessions = 0;
|
||||
peak_sessions = 0;
|
||||
parent = parent_port_;
|
||||
max_sessions = max_sessions_;
|
||||
name = std::move(name_);
|
||||
m_num_sessions = 0;
|
||||
m_peak_sessions = 0;
|
||||
m_parent = parent;
|
||||
m_max_sessions = max_sessions;
|
||||
}
|
||||
|
||||
void KClientPort::OnSessionFinalized() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// This might happen if a session was improperly used with this port.
|
||||
ASSERT_MSG(num_sessions > 0, "num_sessions is invalid");
|
||||
|
||||
const auto prev = num_sessions--;
|
||||
if (prev == max_sessions) {
|
||||
if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
@@ -47,81 +42,81 @@ bool KClientPort::IsServerClosed() const {
|
||||
|
||||
void KClientPort::Destroy() {
|
||||
// Note with our parent that we're closed.
|
||||
parent->OnClientClosed();
|
||||
m_parent->OnClientClosed();
|
||||
|
||||
// Close our reference to our parent.
|
||||
parent->Close();
|
||||
m_parent->Close();
|
||||
}
|
||||
|
||||
bool KClientPort::IsSignaled() const {
|
||||
return num_sessions < max_sessions;
|
||||
return m_num_sessions.load() < m_max_sessions;
|
||||
}
|
||||
|
||||
Result KClientPort::CreateSession(KClientSession** out) {
|
||||
// Declare the session we're going to allocate.
|
||||
KSession* session{};
|
||||
|
||||
// Reserve a new session from the resource limit.
|
||||
//! FIXME: we are reserving this from the wrong resource limit!
|
||||
KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(),
|
||||
LimitableResource::SessionCountMax);
|
||||
KScopedResourceReservation session_reservation(
|
||||
m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate a session normally.
|
||||
session = KSession::Create(m_kernel);
|
||||
|
||||
// Check that we successfully created a session.
|
||||
R_UNLESS(session != nullptr, ResultOutOfResource);
|
||||
|
||||
// Update the session counts.
|
||||
{
|
||||
ON_RESULT_FAILURE {
|
||||
session->Close();
|
||||
};
|
||||
|
||||
// Atomically increment the number of sessions.
|
||||
s32 new_sessions{};
|
||||
{
|
||||
const auto max = max_sessions;
|
||||
auto cur_sessions = num_sessions.load(std::memory_order_acquire);
|
||||
const auto max = m_max_sessions;
|
||||
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
|
||||
do {
|
||||
R_UNLESS(cur_sessions < max, ResultOutOfSessions);
|
||||
new_sessions = cur_sessions + 1;
|
||||
} while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
// Atomically update the peak session tracking.
|
||||
{
|
||||
auto peak = peak_sessions.load(std::memory_order_acquire);
|
||||
auto peak = m_peak_sessions.load(std::memory_order_acquire);
|
||||
do {
|
||||
if (peak >= new_sessions) {
|
||||
break;
|
||||
}
|
||||
} while (!peak_sessions.compare_exchange_weak(peak, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new session.
|
||||
KSession* session = KSession::Create(kernel);
|
||||
if (session == nullptr) {
|
||||
// Decrement the session count.
|
||||
const auto prev = num_sessions--;
|
||||
if (prev == max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return ResultOutOfResource;
|
||||
}
|
||||
|
||||
// Initialize the session.
|
||||
session->Initialize(this, parent->GetName());
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
||||
// Commit the session reservation.
|
||||
session_reservation.Commit();
|
||||
|
||||
// Register the session.
|
||||
KSession::Register(kernel, session);
|
||||
auto session_guard = SCOPE_GUARD({
|
||||
KSession::Register(m_kernel, session);
|
||||
ON_RESULT_FAILURE {
|
||||
session->GetClientSession().Close();
|
||||
session->GetServerSession().Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Enqueue the session with our parent.
|
||||
R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
|
||||
// We succeeded, so set the output.
|
||||
session_guard.Cancel();
|
||||
*out = std::addressof(session->GetClientSession());
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
@@ -20,28 +19,28 @@ class KClientPort final : public KSynchronizationObject {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
|
||||
|
||||
public:
|
||||
explicit KClientPort(KernelCore& kernel_);
|
||||
explicit KClientPort(KernelCore& kernel);
|
||||
~KClientPort() override;
|
||||
|
||||
void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_);
|
||||
void Initialize(KPort* parent, s32 max_sessions);
|
||||
void OnSessionFinalized();
|
||||
void OnServerClosed();
|
||||
|
||||
const KPort* GetParent() const {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
KPort* GetParent() {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
|
||||
s32 GetNumSessions() const {
|
||||
return num_sessions;
|
||||
return m_num_sessions;
|
||||
}
|
||||
s32 GetPeakSessions() const {
|
||||
return peak_sessions;
|
||||
return m_peak_sessions;
|
||||
}
|
||||
s32 GetMaxSessions() const {
|
||||
return max_sessions;
|
||||
return m_max_sessions;
|
||||
}
|
||||
|
||||
bool IsLight() const;
|
||||
@@ -54,10 +53,10 @@ public:
|
||||
Result CreateSession(KClientSession** out);
|
||||
|
||||
private:
|
||||
std::atomic<s32> num_sessions{};
|
||||
std::atomic<s32> peak_sessions{};
|
||||
s32 max_sessions{};
|
||||
KPort* parent{};
|
||||
std::atomic<s32> m_num_sessions{};
|
||||
std::atomic<s32> m_peak_sessions{};
|
||||
s32 m_max_sessions{};
|
||||
KPort* m_parent{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,28 +12,27 @@ namespace Kernel {
|
||||
|
||||
static constexpr u32 MessageBufferSize = 0x100;
|
||||
|
||||
KClientSession::KClientSession(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
|
||||
KClientSession::~KClientSession() = default;
|
||||
|
||||
void KClientSession::Destroy() {
|
||||
parent->OnClientClosed();
|
||||
parent->Close();
|
||||
m_parent->OnClientClosed();
|
||||
m_parent->Close();
|
||||
}
|
||||
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
Result KClientSession::SendSyncRequest() {
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(kernel);
|
||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
|
||||
request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize);
|
||||
|
||||
// Send the request.
|
||||
return parent->GetServerSession().OnRequest(request);
|
||||
R_RETURN(m_parent->GetServerSession().OnRequest(request));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -30,20 +30,19 @@ class KClientSession final
|
||||
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KClientSession(KernelCore& kernel_);
|
||||
explicit KClientSession(KernelCore& kernel);
|
||||
~KClientSession() override;
|
||||
|
||||
void Initialize(KSession* parent_session_, std::string&& name_) {
|
||||
void Initialize(KSession* parent) {
|
||||
// Set member variables.
|
||||
parent = parent_session_;
|
||||
name = std::move(name_);
|
||||
m_parent = parent;
|
||||
}
|
||||
|
||||
void Destroy() override;
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
KSession* GetParent() const {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
|
||||
Result SendSyncRequest();
|
||||
@@ -51,7 +50,7 @@ public:
|
||||
void OnServerClosed();
|
||||
|
||||
private:
|
||||
KSession* parent{};
|
||||
KSession* m_parent{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -16,18 +16,18 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
|
||||
|
||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
// Set members.
|
||||
m_owner = GetCurrentProcessPointer(kernel);
|
||||
m_owner = GetCurrentProcessPointer(m_kernel);
|
||||
|
||||
// Get the owner page table.
|
||||
auto& page_table = m_owner->PageTable();
|
||||
|
||||
// Construct the page group.
|
||||
m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
|
||||
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
|
||||
|
||||
// Lock the memory.
|
||||
R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
|
||||
@@ -45,7 +45,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||
m_is_mapped = false;
|
||||
|
||||
// We succeeded.
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KCodeMemory::Finalize() {
|
||||
@@ -74,13 +74,13 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||
R_UNLESS(!m_is_mapped, ResultInvalidState);
|
||||
|
||||
// Map the memory.
|
||||
R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup(
|
||||
R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
|
||||
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
|
||||
|
||||
// Mark ourselves as mapped.
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
@@ -91,13 +91,13 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group,
|
||||
KMemoryState::CodeOut));
|
||||
R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
|
||||
KMemoryState::CodeOut));
|
||||
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
@@ -131,7 +131,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
|
||||
// Mark ourselves as mapped.
|
||||
m_is_owner_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
@@ -147,7 +147,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_owner_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -29,7 +29,7 @@ class KCodeMemory final
|
||||
KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KCodeMemory(KernelCore& kernel_);
|
||||
explicit KCodeMemory(KernelCore& kernel);
|
||||
|
||||
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
void Finalize() override;
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
bool IsInitialized() const override {
|
||||
return m_is_initialized;
|
||||
}
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
KProcess* GetOwner() const override {
|
||||
return m_owner;
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_linked_list.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
@@ -58,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||
: KThreadQueue(kernel_) {}
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
|
||||
: KThreadQueue(kernel) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -76,8 +75,8 @@ private:
|
||||
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
KernelCore& kernel, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -98,17 +97,17 @@ public:
|
||||
|
||||
} // namespace
|
||||
|
||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KConditionVariable::KConditionVariable(Core::System& system)
|
||||
: m_system{system}, m_kernel{system.Kernel()} {}
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Remove waiter thread.
|
||||
bool has_waiters{};
|
||||
@@ -129,7 +128,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
|
||||
// Write the value to userspace.
|
||||
Result result{ResultSuccess};
|
||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||
if (WriteToUser(m_system, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
result = ResultInvalidCurrentMemory;
|
||||
@@ -145,26 +144,27 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
|
||||
|
||||
// Wait for the address.
|
||||
KThread* owner_thread{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(ReadFromUser(m_system, std::addressof(test_tag), addr),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = GetCurrentProcess(kernel)
|
||||
owner_thread = GetCurrentProcess(m_kernel)
|
||||
.GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||
.ReleasePointerUnsafe();
|
||||
@@ -177,19 +177,18 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
|
||||
// Begin waiting.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
|
||||
// Close our reference to the owner thread, now that the wait is over.
|
||||
owner_thread->Close();
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
|
||||
// Update the tag.
|
||||
VAddr address = thread->GetAddressKey();
|
||||
@@ -204,7 +203,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
can_access = true;
|
||||
if (can_access) [[likely]] {
|
||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||
UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
|
||||
Svc::HandleWaitMask);
|
||||
}
|
||||
}
|
||||
@@ -215,7 +214,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
thread->EndWait(ResultSuccess);
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
KThread* owner_thread = GetCurrentProcess(kernel)
|
||||
KThread* owner_thread = GetCurrentProcess(m_kernel)
|
||||
.GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(
|
||||
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
||||
@@ -240,14 +239,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({cv_key, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
target_thread->ClearConditionVariable();
|
||||
|
||||
this->SignalImpl(target_thread);
|
||||
@@ -256,26 +255,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
}
|
||||
|
||||
// If we have no waiters, clear the has waiter flag.
|
||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
const u32 has_waiter_flag{};
|
||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||
WriteToUser(m_system, cv_key, std::addressof(has_waiter_flag));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||
kernel, std::addressof(thread_tree));
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel,
|
||||
std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
|
||||
KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Update the value and process for the next owner.
|
||||
@@ -301,14 +301,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Write to the cv key.
|
||||
{
|
||||
const u32 has_waiter_flag = 1;
|
||||
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
||||
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
||||
WriteToUser(m_system, key, std::addressof(has_waiter_flag));
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (!WriteToUser(m_system, addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,17 +316,17 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
R_UNLESS(timeout != 0, ResultTimedOut);
|
||||
|
||||
// Update condition variable tracking.
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Begin waiting.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -21,36 +21,36 @@ class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||
|
||||
explicit KConditionVariable(Core::System& system_);
|
||||
explicit KConditionVariable(Core::System& system);
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
Result SignalToAddress(VAddr addr);
|
||||
Result WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
void SignalImpl(KThread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
private:
|
||||
Core::System& m_system;
|
||||
KernelCore& m_kernel;
|
||||
ThreadTree m_tree{};
|
||||
};
|
||||
|
||||
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
|
||||
tree->erase(tree->iterator_to(*thread));
|
||||
}
|
||||
|
||||
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
|
||||
tree->insert(*thread);
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
|
||||
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
|
||||
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {}
|
||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
|
||||
KDeviceAddressSpace::~KDeviceAddressSpace() = default;
|
||||
|
||||
void KDeviceAddressSpace::Initialize() {
|
||||
|
||||
@@ -21,9 +21,9 @@ public:
|
||||
~KDeviceAddressSpace();
|
||||
|
||||
Result Initialize(u64 address, u64 size);
|
||||
void Finalize();
|
||||
void Finalize() override;
|
||||
|
||||
bool IsInitialized() const {
|
||||
bool IsInitialized() const override {
|
||||
return m_is_initialized;
|
||||
}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KEvent::KEvent(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
|
||||
KEvent::KEvent(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
|
||||
|
||||
KEvent::~KEvent() = default;
|
||||
|
||||
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
|
||||
}
|
||||
|
||||
Result KEvent::Signal() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
|
||||
}
|
||||
|
||||
Result KEvent::Clear() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
|
||||
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KEvent(KernelCore& kernel_);
|
||||
explicit KEvent(KernelCore& kernel);
|
||||
~KEvent() override;
|
||||
|
||||
void Initialize(KProcess* owner);
|
||||
|
||||
@@ -13,9 +13,9 @@ namespace {
|
||||
|
||||
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
|
||||
public:
|
||||
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
|
||||
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl,
|
||||
bool term)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
: KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Only process waits if we're allowed to.
|
||||
@@ -39,14 +39,15 @@ private:
|
||||
|
||||
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||
// Create thread queue.
|
||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||
KThread* owner = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list),
|
||||
allow_terminating_thread);
|
||||
|
||||
// Sleep the thread.
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
|
||||
KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout);
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
@@ -56,9 +57,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
|
||||
lock->Unlock();
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(*owner);
|
||||
m_wait_list.push_back(*owner);
|
||||
|
||||
// Begin waiting.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
owner->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
@@ -67,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
|
||||
}
|
||||
|
||||
void KLightConditionVariable::Broadcast() {
|
||||
KScopedSchedulerLock lk(kernel);
|
||||
KScopedSchedulerLock lk(m_kernel);
|
||||
|
||||
// Signal all threads.
|
||||
for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
|
||||
for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
|
||||
it->EndWait(ResultSuccess);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,13 +13,13 @@ class KLightLock;
|
||||
|
||||
class KLightConditionVariable {
|
||||
public:
|
||||
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {}
|
||||
|
||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
|
||||
void Broadcast();
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
KThread::WaiterList wait_list{};
|
||||
KernelCore& m_kernel;
|
||||
KThread::WaiterList m_wait_list{};
|
||||
};
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,7 +13,7 @@ namespace {
|
||||
|
||||
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -29,13 +29,13 @@ public:
|
||||
} // namespace
|
||||
|
||||
void KLightLock::Lock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
|
||||
|
||||
while (true) {
|
||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||
uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
|
||||
|
||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||
std::memory_order_acquire)) {
|
||||
while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||
std::memory_order_acquire)) {
|
||||
}
|
||||
|
||||
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
|
||||
@@ -45,30 +45,30 @@ void KLightLock::Lock() {
|
||||
}
|
||||
|
||||
void KLightLock::Unlock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
|
||||
|
||||
uintptr_t expected = cur_thread;
|
||||
if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||
if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||
this->UnlockSlowPath(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||
ThreadQueueImplForKLightLock wait_queue(kernel);
|
||||
ThreadQueueImplForKLightLock wait_queue(m_kernel);
|
||||
|
||||
// Pend the current thread waiting on the owner thread.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Ensure we actually have locking to do.
|
||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||
if (m_tag.load(std::memory_order_relaxed) != _owner) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Add the current thread as a waiter on the owner.
|
||||
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
|
||||
cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
// Begin waiting to hold the lock.
|
||||
@@ -87,12 +87,12 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
|
||||
// Unlock.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Get the next owner.
|
||||
bool has_waiters;
|
||||
KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
|
||||
std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
|
||||
|
||||
// Pass the lock to the next owner.
|
||||
uintptr_t next_tag = 0;
|
||||
@@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
}
|
||||
|
||||
// Write the new tag value.
|
||||
tag.store(next_tag, std::memory_order_release);
|
||||
m_tag.store(next_tag, std::memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
bool KLightLock::IsLockedByCurrentThread() const {
|
||||
return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
|
||||
return (m_tag.load() | 1ULL) ==
|
||||
(reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,7 +13,7 @@ class KernelCore;
|
||||
|
||||
class KLightLock {
|
||||
public:
|
||||
explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {}
|
||||
|
||||
void Lock();
|
||||
|
||||
@@ -24,14 +24,14 @@ public:
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
bool IsLocked() const {
|
||||
return tag != 0;
|
||||
return m_tag.load() != 0;
|
||||
}
|
||||
|
||||
bool IsLockedByCurrentThread() const;
|
||||
|
||||
private:
|
||||
std::atomic<uintptr_t> tag{};
|
||||
KernelCore& kernel;
|
||||
std::atomic<uintptr_t> m_tag{};
|
||||
KernelCore& m_kernel;
|
||||
};
|
||||
|
||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KLinkedListNode : public boost::intrusive::list_base_hook<>,
|
||||
public KSlabAllocated<KLinkedListNode> {
|
||||
|
||||
public:
|
||||
explicit KLinkedListNode(KernelCore&) {}
|
||||
KLinkedListNode() = default;
|
||||
|
||||
void Initialize(void* it) {
|
||||
m_item = it;
|
||||
}
|
||||
|
||||
void* GetItem() const {
|
||||
return m_item;
|
||||
}
|
||||
|
||||
private:
|
||||
void* m_item = nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
|
||||
private:
|
||||
using BaseList = boost::intrusive::list<KLinkedListNode>;
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
private:
|
||||
using BaseIterator = BaseList::iterator;
|
||||
friend class KLinkedList;
|
||||
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename KLinkedList::value_type;
|
||||
using difference_type = typename KLinkedList::difference_type;
|
||||
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
|
||||
using reference =
|
||||
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
|
||||
|
||||
public:
|
||||
explicit Iterator(BaseIterator it) : m_base_it(it) {}
|
||||
|
||||
pointer GetItem() const {
|
||||
return static_cast<pointer>(m_base_it->GetItem());
|
||||
}
|
||||
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return m_base_it == rhs.m_base_it;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return this->GetItem();
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *this->GetItem();
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
++m_base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
--m_base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(m_base_it);
|
||||
}
|
||||
|
||||
private:
|
||||
BaseIterator m_base_it;
|
||||
};
|
||||
|
||||
public:
|
||||
constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
|
||||
|
||||
~KLinkedList() {
|
||||
// Erase all elements.
|
||||
for (auto it = begin(); it != end(); it = erase(it)) {
|
||||
}
|
||||
|
||||
// Ensure we succeeded.
|
||||
ASSERT(this->empty());
|
||||
}
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
reverse_iterator rbegin() {
|
||||
return reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
const_reverse_iterator rbegin() const {
|
||||
return const_reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
reverse_iterator rend() {
|
||||
return reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator rend() const {
|
||||
return const_reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator crbegin() const {
|
||||
return this->rbegin();
|
||||
}
|
||||
|
||||
const_reverse_iterator crend() const {
|
||||
return this->rend();
|
||||
}
|
||||
|
||||
// Content management.
|
||||
using BaseList::empty;
|
||||
using BaseList::size;
|
||||
|
||||
reference back() {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
iterator insert(const_iterator pos, reference ref) {
|
||||
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
|
||||
ASSERT(new_node != nullptr);
|
||||
new_node->Initialize(std::addressof(ref));
|
||||
return iterator(BaseList::insert(pos.m_base_it, *new_node));
|
||||
}
|
||||
|
||||
void push_back(reference ref) {
|
||||
this->insert(this->end(), ref);
|
||||
}
|
||||
|
||||
void push_front(reference ref) {
|
||||
this->insert(this->begin(), ref);
|
||||
}
|
||||
|
||||
void pop_back() {
|
||||
this->erase(--this->end());
|
||||
}
|
||||
|
||||
void pop_front() {
|
||||
this->erase(this->begin());
|
||||
}
|
||||
|
||||
iterator erase(const iterator pos) {
|
||||
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
|
||||
iterator ret = iterator(BaseList::erase(pos.m_base_it));
|
||||
KLinkedListNode::Free(kernel, freed_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -471,8 +471,8 @@ public:
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission/right aren't used.
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
@@ -482,8 +482,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission/left aren't used.
|
||||
if (right) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
@@ -499,8 +499,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must either be shared or have a zero lock count.
|
||||
@@ -516,8 +515,8 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm,
|
||||
bool left, bool right) {
|
||||
// New permission/right aren't used.
|
||||
|
||||
if (left) {
|
||||
@@ -536,8 +535,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm,
|
||||
bool left, bool right) {
|
||||
// New permission/left aren't used.
|
||||
|
||||
if (right) {
|
||||
@@ -556,8 +555,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
@@ -575,8 +573,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
@@ -594,7 +591,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// We must either be locked or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
|
||||
m_ipc_lock_count == 0);
|
||||
@@ -626,8 +623,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
[[maybe_unused]] bool right) {
|
||||
constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be locked.
|
||||
|
||||
@@ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A
|
||||
|
||||
} // namespace
|
||||
|
||||
KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_)
|
||||
: memory_region_allocator{memory_region_allocator_} {}
|
||||
KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator)
|
||||
: m_memory_region_allocator{memory_region_allocator} {}
|
||||
|
||||
void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
|
||||
this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id));
|
||||
this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id));
|
||||
}
|
||||
|
||||
bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
|
||||
@@ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
|
||||
const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
|
||||
? old_pair + (address - old_address)
|
||||
: old_pair;
|
||||
this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last,
|
||||
this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last,
|
||||
new_pair, new_attr, type_id));
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
|
||||
const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
|
||||
? old_pair + (inserted_region_end - old_address)
|
||||
: old_pair;
|
||||
this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last,
|
||||
this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last,
|
||||
after_pair, old_attr, old_type));
|
||||
}
|
||||
|
||||
@@ -126,14 +126,15 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u
|
||||
}
|
||||
|
||||
KMemoryLayout::KMemoryLayout()
|
||||
: virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator},
|
||||
virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {}
|
||||
: m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator},
|
||||
m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
|
||||
m_memory_region_allocator} {}
|
||||
|
||||
void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
|
||||
VAddr linear_virtual_start) {
|
||||
// Set static differences.
|
||||
linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
|
||||
linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
|
||||
m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
|
||||
m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
|
||||
|
||||
// Initialize linear trees.
|
||||
for (auto& region : GetPhysicalMemoryRegionTree()) {
|
||||
|
||||
@@ -80,35 +80,35 @@ public:
|
||||
KMemoryLayout();
|
||||
|
||||
KMemoryRegionTree& GetVirtualMemoryRegionTree() {
|
||||
return virtual_tree;
|
||||
return m_virtual_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
|
||||
return virtual_tree;
|
||||
return m_virtual_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
|
||||
return physical_tree;
|
||||
return m_physical_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
|
||||
return physical_tree;
|
||||
return m_physical_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
|
||||
return virtual_linear_tree;
|
||||
return m_virtual_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
|
||||
return virtual_linear_tree;
|
||||
return m_virtual_linear_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
|
||||
return physical_linear_tree;
|
||||
return m_physical_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
|
||||
return physical_linear_tree;
|
||||
return m_physical_linear_tree;
|
||||
}
|
||||
|
||||
VAddr GetLinearVirtualAddress(PAddr address) const {
|
||||
return address + linear_phys_to_virt_diff;
|
||||
return address + m_linear_phys_to_virt_diff;
|
||||
}
|
||||
PAddr GetLinearPhysicalAddress(VAddr address) const {
|
||||
return address + linear_virt_to_phys_diff;
|
||||
return address + m_linear_virt_to_phys_diff;
|
||||
}
|
||||
|
||||
const KMemoryRegion* FindVirtual(VAddr address) const {
|
||||
@@ -391,13 +391,13 @@ private:
|
||||
}
|
||||
|
||||
private:
|
||||
u64 linear_phys_to_virt_diff{};
|
||||
u64 linear_virt_to_phys_diff{};
|
||||
KMemoryRegionAllocator memory_region_allocator;
|
||||
KMemoryRegionTree virtual_tree;
|
||||
KMemoryRegionTree physical_tree;
|
||||
KMemoryRegionTree virtual_linear_tree;
|
||||
KMemoryRegionTree physical_linear_tree;
|
||||
u64 m_linear_phys_to_virt_diff{};
|
||||
u64 m_linear_virt_to_phys_diff{};
|
||||
KMemoryRegionAllocator m_memory_region_allocator;
|
||||
KMemoryRegionTree m_virtual_tree;
|
||||
KMemoryRegionTree m_physical_tree;
|
||||
KMemoryRegionTree m_virtual_linear_tree;
|
||||
KMemoryRegionTree m_physical_linear_tree;
|
||||
};
|
||||
|
||||
namespace Init {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user