Compare commits
119 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2cdfbbc07d | ||
|
|
cdb9fe978f | ||
|
|
2dd6a2352d | ||
|
|
041eb5bf57 | ||
|
|
8b4d5aeb4f | ||
|
|
d8e3380ea5 | ||
|
|
e59bd6c335 | ||
|
|
77803d96be | ||
|
|
fa9b7db76f | ||
|
|
fa913a702f | ||
|
|
3c38bd7cf0 | ||
|
|
1a378a7769 | ||
|
|
cbb6c24215 | ||
|
|
2e782a154d | ||
|
|
0313ee7793 | ||
|
|
05f2673648 | ||
|
|
2d90a927c9 | ||
|
|
2ccbf5abdd | ||
|
|
120cd450e5 | ||
|
|
f51c71e956 | ||
|
|
bb31b0f261 | ||
|
|
f86774c1ac | ||
|
|
42c4ef7373 | ||
|
|
c7e079a5d4 | ||
|
|
6908ea2284 | ||
|
|
347432524c | ||
|
|
b02c3f2314 | ||
|
|
3822e31323 | ||
|
|
cae108404a | ||
|
|
bad3025951 | ||
|
|
f3c40f4a20 | ||
|
|
e6ab1f673b | ||
|
|
93297d14d8 | ||
|
|
91c410c918 | ||
|
|
496695618a | ||
|
|
0860fffd78 | ||
|
|
2f90694797 | ||
|
|
3e0aaeba98 | ||
|
|
82fdfb33ac | ||
|
|
1f54cd4ac7 | ||
|
|
efaedcab31 | ||
|
|
49682a0481 | ||
|
|
aa9e9052a6 | ||
|
|
93a7058d8e | ||
|
|
f16db300c6 | ||
|
|
969387a79a | ||
|
|
3968faec06 | ||
|
|
7f66050f0c | ||
|
|
0b181eeef4 | ||
|
|
6b71530fa8 | ||
|
|
a6628e8dba | ||
|
|
9e16837088 | ||
|
|
c0b1bdd237 | ||
|
|
d4c0b7b437 | ||
|
|
7daf751b8d | ||
|
|
fca195b4fb | ||
|
|
3efb8eb2dc | ||
|
|
5ffb8b8039 | ||
|
|
925fb63478 | ||
|
|
560bca57a2 | ||
|
|
97879faea4 | ||
|
|
7bd3930939 | ||
|
|
b8a70c9999 | ||
|
|
3cb4498142 | ||
|
|
a264b54022 | ||
|
|
638fa6170a | ||
|
|
11f85ea713 | ||
|
|
829e82e264 | ||
|
|
a4d11f4427 | ||
|
|
1b787adbd0 | ||
|
|
abcc009dff | ||
|
|
79bcb38321 | ||
|
|
8d4e026d05 | ||
|
|
ff26190d42 | ||
|
|
d00245d444 | ||
|
|
1baedfa12c | ||
|
|
ed591934fb | ||
|
|
58eb6953d1 | ||
|
|
2bb41cffca | ||
|
|
57a77e9ff4 | ||
|
|
d02ccfb15d | ||
|
|
9ec5f75f43 | ||
|
|
345b9e6a08 | ||
|
|
25dcaf1eca | ||
|
|
113a5ed68f | ||
|
|
47b8160666 | ||
|
|
cb073f95dc | ||
|
|
e63a5459e3 | ||
|
|
6e1c6297a3 | ||
|
|
b6119a55f9 | ||
|
|
0cfd90004b | ||
|
|
2cc9d94060 | ||
|
|
0101ef9fb1 | ||
|
|
9393f90ccf | ||
|
|
5000d814af | ||
|
|
8649c46c74 | ||
|
|
1deb997eba | ||
|
|
282cd3e5fe | ||
|
|
23b6569fc2 | ||
|
|
99507d0188 | ||
|
|
88ccdaf10a | ||
|
|
bffbaddb79 | ||
|
|
c75a4bdeaa | ||
|
|
2f37c7948f | ||
|
|
f107e58fde | ||
|
|
c70e1d0247 | ||
|
|
ae453ab6a8 | ||
|
|
20139f8a55 | ||
|
|
4b773b15a6 | ||
|
|
9fe077635e | ||
|
|
5c7eef3756 | ||
|
|
ddf5577799 | ||
|
|
f706b3bd24 | ||
|
|
d574bb4610 | ||
|
|
b0ba1a0b65 | ||
|
|
0ba03d1b3a | ||
|
|
fcebd36cde | ||
|
|
40af1111c2 | ||
|
|
d4cb0eac87 |
@@ -3,15 +3,6 @@
|
||||
# SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# Setup RC file for tx
|
||||
cat << EOF > ~/.transifexrc
|
||||
[https://www.transifex.com]
|
||||
hostname = https://www.transifex.com
|
||||
username = api
|
||||
password = $TRANSIFEX_API_TOKEN
|
||||
EOF
|
||||
|
||||
|
||||
set -x
|
||||
|
||||
echo -e "\e[1m\e[33mBuild tools information:\e[0m"
|
||||
@@ -19,9 +10,6 @@ cmake --version
|
||||
gcc -v
|
||||
tx --version
|
||||
|
||||
# vcpkg needs these: curl zip unzip tar, have tar
|
||||
apt-get install -y curl zip unzip
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_QT_TRANSLATION=ON -DGENERATE_QT_TRANSLATION=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_SDL2=OFF -DYUZU_TESTS=OFF -DYUZU_USE_BUNDLED_VCPKG=ON
|
||||
make translation
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
fetch-depth: 0
|
||||
- name: Update Translation
|
||||
run: ./.ci/scripts/transifex/docker.sh
|
||||
env:
|
||||
TRANSIFEX_API_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }}
|
||||
TX_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }}
|
||||
|
||||
reuse:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -541,9 +541,9 @@ add_definitions(-DBOOST_ERROR_CODE_HEADER_ONLY
|
||||
# Adjustments for MSVC + Ninja
|
||||
if (MSVC AND CMAKE_GENERATOR STREQUAL "Ninja")
|
||||
add_compile_options(
|
||||
/wd4711 # function 'function' selected for automatic inline expansion
|
||||
/wd4464 # relative include path contains '..'
|
||||
/wd4820 # 'identifier1': '4' bytes padding added after data member 'identifier2'
|
||||
/wd4711 # function 'function' selected for automatic inline expansion
|
||||
/wd4820 # 'bytes' bytes padding added after construct 'member_name'
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
2
dist/languages/.tx/config
vendored
2
dist/languages/.tx/config
vendored
@@ -1,7 +1,7 @@
|
||||
[main]
|
||||
host = https://www.transifex.com
|
||||
|
||||
[yuzu.emulator]
|
||||
[o:yuzu-emulator:p:yuzu:r:emulator]
|
||||
file_filter = <lang>.ts
|
||||
source_file = en.ts
|
||||
source_lang = en
|
||||
|
||||
4
dist/languages/README.md
vendored
4
dist/languages/README.md
vendored
@@ -1 +1,3 @@
|
||||
This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically. Do not directly open PRs on github to modify the translation.
|
||||
This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -t -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically.
|
||||
|
||||
Do not directly open PRs on github to modify the translation.
|
||||
|
||||
@@ -58,13 +58,11 @@ if (MSVC)
|
||||
|
||||
# Warnings
|
||||
/W3
|
||||
/we4018 # 'expression': signed/unsigned mismatch
|
||||
/WX
|
||||
|
||||
/we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled
|
||||
/we4101 # 'identifier': unreferenced local variable
|
||||
/we4189 # 'identifier': local variable is initialized but not referenced
|
||||
/we4265 # 'class': class has virtual functions, but destructor is not virtual
|
||||
/we4267 # 'var': conversion from 'size_t' to 'type', possible loss of data
|
||||
/we4305 # 'context': truncation from 'type1' to 'type2'
|
||||
/we4388 # 'expression': signed/unsigned mismatch
|
||||
/we4389 # 'operator': signed/unsigned mismatch
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
@@ -75,10 +73,13 @@ if (MSVC)
|
||||
/we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
|
||||
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
|
||||
/we4555 # Expression has no effect; expected expression with side-effect
|
||||
/we4715 # 'function': not all control paths return a value
|
||||
/we4834 # Discarding return value of function with 'nodiscard' attribute
|
||||
/we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
|
||||
/we5038 # data member 'member1' will be initialized after data member 'member2'
|
||||
/we5233 # explicit lambda capture 'identifier' is not used
|
||||
/we5245 # 'function': unreferenced function with internal linkage has been removed
|
||||
|
||||
/wd4100 # 'identifier': unreferenced formal parameter
|
||||
/wd4324 # 'struct_name': structure was padded due to __declspec(align())
|
||||
)
|
||||
|
||||
if (USE_CCACHE)
|
||||
@@ -99,24 +100,18 @@ if (MSVC)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
||||
else()
|
||||
add_compile_options(
|
||||
-Wall
|
||||
-Werror=array-bounds
|
||||
-Werror=implicit-fallthrough
|
||||
-Werror=all
|
||||
-Werror=extra
|
||||
-Werror=missing-declarations
|
||||
-Werror=missing-field-initializers
|
||||
-Werror=reorder
|
||||
-Werror=shadow
|
||||
-Werror=sign-compare
|
||||
-Werror=switch
|
||||
-Werror=uninitialized
|
||||
-Werror=unused-function
|
||||
-Werror=unused-result
|
||||
-Werror=unused-variable
|
||||
-Wextra
|
||||
-Wmissing-declarations
|
||||
-Werror=unused
|
||||
|
||||
-Wno-attributes
|
||||
-Wno-invalid-offsetof
|
||||
-Wno-unused-parameter
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-braced-scalar-init>
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-unused-private-field>
|
||||
)
|
||||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
|
||||
@@ -206,20 +206,11 @@ if (MSVC)
|
||||
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
/we4457 # Declaration of 'identifier' hides function parameter
|
||||
/we4458 # Declaration of 'identifier' hides class member
|
||||
/we4459 # Declaration of 'identifier' hides global declaration
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(audio_core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
-Werror=shadow
|
||||
-Werror=unused-variable
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
-Wno-sign-conversion
|
||||
)
|
||||
|
||||
@@ -91,7 +91,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
|
||||
voice_info.Initialize();
|
||||
|
||||
for (u32 channel = 0; channel < in_param.channel_count; channel++) {
|
||||
std::memset(voice_states[channel], 0, sizeof(VoiceState));
|
||||
*voice_states[channel] = {};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor
|
||||
void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
|
||||
auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)};
|
||||
if (needs_init) {
|
||||
std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState));
|
||||
*state_ = {};
|
||||
}
|
||||
|
||||
auto input_buffer{
|
||||
|
||||
@@ -30,7 +30,7 @@ void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& proc
|
||||
for (u32 i = 0; i < filter_tap_count; i++) {
|
||||
auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])};
|
||||
if (needs_init[i]) {
|
||||
std::memset(state, 0, sizeof(VoiceState::BiquadFilterState));
|
||||
*state = {};
|
||||
}
|
||||
|
||||
ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state,
|
||||
|
||||
@@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
|
||||
: core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
|
||||
|
||||
Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size,
|
||||
const u32 process_handle_, const u64 applet_resource_user_id_,
|
||||
const s32 session_id_) {
|
||||
Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
|
||||
u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
|
||||
if (!CheckValidRevision(params.revision)) {
|
||||
return Service::Audio::ERR_INVALID_REVISION;
|
||||
}
|
||||
@@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
|
||||
render_time_limit_percent = 100;
|
||||
drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
|
||||
drop_voice_param = 1.0f;
|
||||
num_voices_dropped = 0;
|
||||
|
||||
allocator.Align(0x40);
|
||||
command_workbuffer_size = allocator.GetRemainingSize();
|
||||
@@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const {
|
||||
return render_time_limit_percent;
|
||||
}
|
||||
|
||||
void System::SetRenderingTimeLimit(const u32 limit) {
|
||||
void System::SetRenderingTimeLimit(u32 limit) {
|
||||
render_time_limit_percent = limit;
|
||||
}
|
||||
|
||||
@@ -635,7 +636,7 @@ void System::SendCommandToDsp() {
|
||||
}
|
||||
|
||||
u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
[[maybe_unused]] const u64 command_buffer_size_) {
|
||||
[[maybe_unused]] u64 command_buffer_size_) {
|
||||
PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
|
||||
const auto start_time{core.CoreTiming().GetClockTicks()};
|
||||
|
||||
@@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
|
||||
voice_context.SortInfo();
|
||||
|
||||
const auto start_estimated_time{command_buffer.estimated_process_time};
|
||||
const auto start_estimated_time{drop_voice_param *
|
||||
static_cast<f32>(command_buffer.estimated_process_time)};
|
||||
|
||||
command_generator.GenerateVoiceCommands();
|
||||
command_generator.GenerateSubMixCommands();
|
||||
@@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
|
||||
time_limit_percent = 70.0f;
|
||||
}
|
||||
|
||||
const auto end_estimated_time{drop_voice_param *
|
||||
static_cast<f32>(command_buffer.estimated_process_time)};
|
||||
const auto estimated_time{start_estimated_time - end_estimated_time};
|
||||
|
||||
const auto time_limit{static_cast<u32>(
|
||||
static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) +
|
||||
(((time_limit_percent / 100.0f) * 2'880'000.0) *
|
||||
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
|
||||
num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit);
|
||||
estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
|
||||
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
|
||||
num_voices_dropped =
|
||||
DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
|
||||
}
|
||||
|
||||
command_list_header->buffer_size = command_buffer.size;
|
||||
@@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
return command_buffer.size;
|
||||
}
|
||||
|
||||
u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time,
|
||||
const u32 time_limit) {
|
||||
f32 System::GetVoiceDropParameter() const {
|
||||
return drop_voice_param;
|
||||
}
|
||||
|
||||
void System::SetVoiceDropParameter(f32 voice_drop_) {
|
||||
drop_voice_param = voice_drop_;
|
||||
}
|
||||
|
||||
u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
|
||||
u32 i{0};
|
||||
auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
|
||||
ICommand* cmd{};
|
||||
ICommand* cmd{nullptr};
|
||||
|
||||
for (; i < command_buffer.count; i++) {
|
||||
// Find a first valid voice to drop
|
||||
while (i < command_buffer.count) {
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
if (cmd->type != CommandId::Performance &&
|
||||
cmd->type != CommandId::DataSourcePcmInt16Version1 &&
|
||||
cmd->type != CommandId::DataSourcePcmInt16Version2 &&
|
||||
cmd->type != CommandId::DataSourcePcmFloatVersion1 &&
|
||||
cmd->type != CommandId::DataSourcePcmFloatVersion2 &&
|
||||
cmd->type != CommandId::DataSourceAdpcmVersion1 &&
|
||||
cmd->type != CommandId::DataSourceAdpcmVersion2) {
|
||||
if (cmd->type == CommandId::Performance ||
|
||||
cmd->type == CommandId::DataSourcePcmInt16Version1 ||
|
||||
cmd->type == CommandId::DataSourcePcmInt16Version2 ||
|
||||
cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
|
||||
cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
|
||||
cmd->type == CommandId::DataSourceAdpcmVersion1 ||
|
||||
cmd->type == CommandId::DataSourceAdpcmVersion2) {
|
||||
break;
|
||||
}
|
||||
command_list += cmd->size;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
|
||||
@@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
const auto node_id_type{cmd->node_id >> 28};
|
||||
const auto node_id_base{cmd->node_id & 0xFFF};
|
||||
|
||||
// If the new estimated process time falls below the limit, we're done dropping.
|
||||
if (estimated_process_time <= time_limit) {
|
||||
break;
|
||||
}
|
||||
@@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
break;
|
||||
}
|
||||
|
||||
// Don't drop voices marked with the highest priority.
|
||||
auto& voice_info{voice_context.GetInfo(node_id_base)};
|
||||
if (voice_info.priority == HighestVoicePriority) {
|
||||
break;
|
||||
@@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
voices_dropped++;
|
||||
voice_info.voice_dropped = true;
|
||||
|
||||
if (i < command_buffer.count) {
|
||||
while (cmd->node_id == node_id) {
|
||||
if (cmd->type == CommandId::DepopPrepare) {
|
||||
cmd->enabled = true;
|
||||
} else if (cmd->type == CommandId::Performance || !cmd->enabled) {
|
||||
cmd->enabled = false;
|
||||
}
|
||||
i++;
|
||||
command_list += cmd->size;
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
// First iteration should drop the voice, and then iterate through all of the commands tied
|
||||
// to the voice. We don't need reverb on a voice which we've just removed, for example.
|
||||
// Depops can't be removed otherwise we'll introduce audio popping, and we don't
|
||||
// remove perf commands. Lower the estimated time for each command dropped.
|
||||
while (i < command_buffer.count && cmd->node_id == node_id) {
|
||||
if (cmd->type == CommandId::DepopPrepare) {
|
||||
cmd->enabled = true;
|
||||
} else if (cmd->enabled && cmd->type != CommandId::Performance) {
|
||||
cmd->enabled = false;
|
||||
estimated_process_time -= static_cast<u32>(
|
||||
drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
|
||||
}
|
||||
command_list += cmd->size;
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return voices_dropped;
|
||||
}
|
||||
|
||||
@@ -196,6 +196,20 @@ public:
|
||||
*/
|
||||
u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
|
||||
|
||||
/**
|
||||
* Get the current voice drop parameter.
|
||||
*
|
||||
* @return The current voice drop.
|
||||
*/
|
||||
f32 GetVoiceDropParameter() const;
|
||||
|
||||
/**
|
||||
* Set the voice drop parameter.
|
||||
*
|
||||
* @param The new voice drop.
|
||||
*/
|
||||
void SetVoiceDropParameter(f32 voice_drop);
|
||||
|
||||
private:
|
||||
/// Core system
|
||||
Core::System& core;
|
||||
@@ -301,6 +315,8 @@ private:
|
||||
u32 num_voices_dropped{};
|
||||
/// Tick that rendering started
|
||||
u64 render_start_tick{};
|
||||
/// Parameter to control the threshold for dropping voices if the audio graph gets too large
|
||||
f32 drop_voice_param{1.0f};
|
||||
};
|
||||
|
||||
} // namespace AudioRenderer
|
||||
|
||||
@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
|
||||
}
|
||||
|
||||
std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
|
||||
return a->priority != b->priority ? a->priority < b->priority
|
||||
: a->sort_order < b->sort_order;
|
||||
return a->priority != b->priority ? a->priority > b->priority
|
||||
: a->sort_order > b->sort_order;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +230,9 @@ std::vector<std::string> ListSDLSinkDevices(bool capture) {
|
||||
|
||||
const int device_count = SDL_GetNumAudioDevices(capture);
|
||||
for (int i = 0; i < device_count; ++i) {
|
||||
device_list.emplace_back(SDL_GetAudioDeviceName(i, 0));
|
||||
if (const char* name = SDL_GetAudioDeviceName(i, capture)) {
|
||||
device_list.emplace_back(name);
|
||||
}
|
||||
}
|
||||
|
||||
return device_list;
|
||||
|
||||
@@ -156,12 +156,13 @@ if (MSVC)
|
||||
)
|
||||
target_compile_options(common PRIVATE
|
||||
/W4
|
||||
/WX
|
||||
|
||||
/we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(common PRIVATE
|
||||
-Werror
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
)
|
||||
endif()
|
||||
@@ -169,7 +170,11 @@ endif()
|
||||
create_target_directory_groups(common)
|
||||
|
||||
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
|
||||
target_link_libraries(common PRIVATE lz4::lz4)
|
||||
if (TARGET lz4::lz4)
|
||||
target_link_libraries(common PRIVATE lz4::lz4)
|
||||
else()
|
||||
target_link_libraries(common PRIVATE LZ4::lz4_shared)
|
||||
endif()
|
||||
if (TARGET zstd::zstd)
|
||||
target_link_libraries(common PRIVATE zstd::zstd)
|
||||
else()
|
||||
|
||||
@@ -141,10 +141,6 @@ public:
|
||||
constexpr BitField(BitField&&) noexcept = default;
|
||||
constexpr BitField& operator=(BitField&&) noexcept = default;
|
||||
|
||||
[[nodiscard]] constexpr operator T() const {
|
||||
return Value();
|
||||
}
|
||||
|
||||
constexpr void Assign(const T& value) {
|
||||
#ifdef _MSC_VER
|
||||
storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
|
||||
@@ -162,6 +158,17 @@ public:
|
||||
return ExtractValue(storage);
|
||||
}
|
||||
|
||||
template <typename ConvertedToType>
|
||||
[[nodiscard]] constexpr ConvertedToType As() const {
|
||||
static_assert(!std::is_same_v<T, ConvertedToType>,
|
||||
"Unnecessary cast. Use Value() instead.");
|
||||
return static_cast<ConvertedToType>(Value());
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr operator T() const {
|
||||
return Value();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr explicit operator bool() const {
|
||||
return Value() != 0;
|
||||
}
|
||||
|
||||
@@ -21,11 +21,6 @@ constexpr size_t hardware_interference_size = std::hardware_destructive_interfer
|
||||
constexpr size_t hardware_interference_size = 64;
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4324)
|
||||
#endif
|
||||
|
||||
template <typename T, size_t capacity = 0x400>
|
||||
class MPSCQueue {
|
||||
public:
|
||||
@@ -160,8 +155,4 @@ private:
|
||||
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -3,24 +3,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Common {
|
||||
|
||||
// Check if type is like an STL container
|
||||
// Check if type satisfies the ContiguousContainer named requirement.
|
||||
template <typename T>
|
||||
concept IsSTLContainer = requires(T t) {
|
||||
typename T::value_type;
|
||||
typename T::iterator;
|
||||
typename T::const_iterator;
|
||||
// TODO(ogniK): Replace below is std::same_as<void> when MSVC supports it.
|
||||
t.begin();
|
||||
t.end();
|
||||
t.cbegin();
|
||||
t.cend();
|
||||
t.data();
|
||||
t.size();
|
||||
};
|
||||
concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>;
|
||||
|
||||
// TODO: Replace with std::derived_from when the <concepts> header
|
||||
// is available on all supported platforms.
|
||||
@@ -34,4 +24,12 @@ concept DerivedFrom = requires {
|
||||
template <typename From, typename To>
|
||||
concept ConvertibleTo = std::is_convertible_v<From, To>;
|
||||
|
||||
// No equivalents in the stdlib
|
||||
|
||||
template <typename T>
|
||||
concept IsArithmetic = std::is_arithmetic_v<T>;
|
||||
|
||||
template <typename T>
|
||||
concept IsIntegral = std::is_integral_v<T>;
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,14 +4,7 @@
|
||||
// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
|
||||
// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
|
||||
|
||||
#ifndef FIXED_H_
|
||||
#define FIXED_H_
|
||||
|
||||
#if __cplusplus >= 201402L
|
||||
#define CONSTEXPR14 constexpr
|
||||
#else
|
||||
#define CONSTEXPR14
|
||||
#endif
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <cstdint>
|
||||
@@ -19,6 +12,8 @@
|
||||
#include <ostream>
|
||||
#include <type_traits>
|
||||
|
||||
#include <common/concepts.h>
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <size_t I, size_t F>
|
||||
@@ -57,8 +52,8 @@ struct type_from_size<64> {
|
||||
static constexpr size_t size = 64;
|
||||
|
||||
using value_type = int64_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<128>;
|
||||
};
|
||||
|
||||
@@ -68,8 +63,8 @@ struct type_from_size<32> {
|
||||
static constexpr size_t size = 32;
|
||||
|
||||
using value_type = int32_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<64>;
|
||||
};
|
||||
|
||||
@@ -79,8 +74,8 @@ struct type_from_size<16> {
|
||||
static constexpr size_t size = 16;
|
||||
|
||||
using value_type = int16_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<32>;
|
||||
};
|
||||
|
||||
@@ -90,8 +85,8 @@ struct type_from_size<8> {
|
||||
static constexpr size_t size = 8;
|
||||
|
||||
using value_type = int8_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<16>;
|
||||
};
|
||||
|
||||
@@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) {
|
||||
struct divide_by_zero : std::exception {};
|
||||
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
constexpr FixedPoint<I, F> divide(
|
||||
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
|
||||
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using next_type = typename FixedPoint<I, F>::next_type;
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
@@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
}
|
||||
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
constexpr FixedPoint<I, F> divide(
|
||||
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
|
||||
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
|
||||
|
||||
@@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
|
||||
// this is the usual implementation of multiplication
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
constexpr FixedPoint<I, F> multiply(
|
||||
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
|
||||
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using next_type = typename FixedPoint<I, F>::next_type;
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
@@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
// it is slightly slower, but is more robust since it doesn't
|
||||
// require and upgraded type
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
constexpr FixedPoint<I, F> multiply(
|
||||
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
|
||||
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
|
||||
@@ -272,19 +267,20 @@ public:
|
||||
static constexpr base_type one = base_type(1) << fractional_bits;
|
||||
|
||||
public: // constructors
|
||||
FixedPoint() = default;
|
||||
FixedPoint(const FixedPoint&) = default;
|
||||
FixedPoint(FixedPoint&&) = default;
|
||||
FixedPoint& operator=(const FixedPoint&) = default;
|
||||
constexpr FixedPoint() = default;
|
||||
|
||||
template <class Number>
|
||||
constexpr FixedPoint(
|
||||
Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
|
||||
: data_(static_cast<base_type>(n * one)) {}
|
||||
constexpr FixedPoint(const FixedPoint&) = default;
|
||||
constexpr FixedPoint& operator=(const FixedPoint&) = default;
|
||||
|
||||
constexpr FixedPoint(FixedPoint&&) noexcept = default;
|
||||
constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
|
||||
|
||||
template <IsArithmetic Number>
|
||||
constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
|
||||
|
||||
public: // conversion
|
||||
template <size_t I2, size_t F2>
|
||||
CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
|
||||
constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
|
||||
static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
|
||||
using T = FixedPoint<I2, F2>;
|
||||
|
||||
@@ -308,36 +304,14 @@ public:
|
||||
}
|
||||
|
||||
public: // comparison operators
|
||||
constexpr bool operator==(FixedPoint rhs) const {
|
||||
return data_ == rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(FixedPoint rhs) const {
|
||||
return data_ != rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator<(FixedPoint rhs) const {
|
||||
return data_ < rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator>(FixedPoint rhs) const {
|
||||
return data_ > rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator<=(FixedPoint rhs) const {
|
||||
return data_ <= rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator>=(FixedPoint rhs) const {
|
||||
return data_ >= rhs.data_;
|
||||
}
|
||||
friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
|
||||
|
||||
public: // unary operators
|
||||
constexpr bool operator!() const {
|
||||
[[nodiscard]] constexpr bool operator!() const {
|
||||
return !data_;
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator~() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator~() const {
|
||||
// NOTE(eteran): this will often appear to "just negate" the value
|
||||
// that is not an error, it is because -x == (~x+1)
|
||||
// and that "+1" is adding an infinitesimally small fraction to the
|
||||
@@ -345,89 +319,87 @@ public: // unary operators
|
||||
return FixedPoint::from_base(~data_);
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator-() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator-() const {
|
||||
return FixedPoint::from_base(-data_);
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator+() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator+() const {
|
||||
return FixedPoint::from_base(+data_);
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator++() {
|
||||
constexpr FixedPoint& operator++() {
|
||||
data_ += one;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator--() {
|
||||
constexpr FixedPoint& operator--() {
|
||||
data_ -= one;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint operator++(int) {
|
||||
constexpr FixedPoint operator++(int) {
|
||||
FixedPoint tmp(*this);
|
||||
data_ += one;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint operator--(int) {
|
||||
constexpr FixedPoint operator--(int) {
|
||||
FixedPoint tmp(*this);
|
||||
data_ -= one;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
public: // basic math operators
|
||||
CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator+=(FixedPoint n) {
|
||||
data_ += n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator-=(FixedPoint n) {
|
||||
data_ -= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator*=(FixedPoint n) {
|
||||
return assign(detail::multiply(*this, n));
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator/=(FixedPoint n) {
|
||||
FixedPoint temp;
|
||||
return assign(detail::divide(*this, n, temp));
|
||||
}
|
||||
|
||||
private:
|
||||
CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
|
||||
constexpr FixedPoint& assign(FixedPoint rhs) {
|
||||
data_ = rhs.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
public: // binary math operators, effects underlying bit pattern since these
|
||||
// don't really typically make sense for non-integer values
|
||||
CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator&=(FixedPoint n) {
|
||||
data_ &= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator|=(FixedPoint n) {
|
||||
data_ |= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator^=(FixedPoint n) {
|
||||
data_ ^= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
|
||||
template <IsIntegral Integer>
|
||||
constexpr FixedPoint& operator>>=(Integer n) {
|
||||
data_ >>= n;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
|
||||
template <IsIntegral Integer>
|
||||
constexpr FixedPoint& operator<<=(Integer n) {
|
||||
data_ <<= n;
|
||||
return *this;
|
||||
}
|
||||
@@ -437,42 +409,42 @@ public: // conversion to basic types
|
||||
data_ += (data_ & fractional_mask) >> 1;
|
||||
}
|
||||
|
||||
constexpr int to_int() {
|
||||
[[nodiscard]] constexpr int to_int() {
|
||||
round_up();
|
||||
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr unsigned int to_uint() const {
|
||||
[[nodiscard]] constexpr unsigned int to_uint() {
|
||||
round_up();
|
||||
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int64_t to_long() {
|
||||
[[nodiscard]] constexpr int64_t to_long() {
|
||||
round_up();
|
||||
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int to_int_floor() const {
|
||||
[[nodiscard]] constexpr int to_int_floor() const {
|
||||
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int64_t to_long_floor() {
|
||||
[[nodiscard]] constexpr int64_t to_long_floor() const {
|
||||
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr unsigned int to_uint_floor() const {
|
||||
[[nodiscard]] constexpr unsigned int to_uint_floor() const {
|
||||
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr float to_float() const {
|
||||
[[nodiscard]] constexpr float to_float() const {
|
||||
return static_cast<float>(data_) / FixedPoint::one;
|
||||
}
|
||||
|
||||
constexpr double to_double() const {
|
||||
[[nodiscard]] constexpr double to_double() const {
|
||||
return static_cast<double>(data_) / FixedPoint::one;
|
||||
}
|
||||
|
||||
constexpr base_type to_raw() const {
|
||||
[[nodiscard]] constexpr base_type to_raw() const {
|
||||
return data_;
|
||||
}
|
||||
|
||||
@@ -480,27 +452,27 @@ public: // conversion to basic types
|
||||
data_ &= fractional_mask;
|
||||
}
|
||||
|
||||
constexpr base_type get_frac() const {
|
||||
[[nodiscard]] constexpr base_type get_frac() const {
|
||||
return data_ & fractional_mask;
|
||||
}
|
||||
|
||||
public:
|
||||
CONSTEXPR14 void swap(FixedPoint& rhs) {
|
||||
constexpr void swap(FixedPoint& rhs) noexcept {
|
||||
using std::swap;
|
||||
swap(data_, rhs.data_);
|
||||
}
|
||||
|
||||
public:
|
||||
base_type data_;
|
||||
base_type data_{};
|
||||
};
|
||||
|
||||
// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
|
||||
// smaller type
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
|
||||
|
||||
// basic math operators
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs += rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs -= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs *= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs /= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs += FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs -= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs *= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs /= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp += rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp -= rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp *= rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp /= rhs;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
// shift operators
|
||||
template <size_t I, size_t F, class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
template <size_t I, size_t F, IsIntegral Integer>
|
||||
constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
lhs <<= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
template <size_t I, size_t F, IsIntegral Integer>
|
||||
constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
lhs >>= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
// comparison operators
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs > FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs < FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs >= FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs <= FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs == FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs != FixedPoint<I, F>(rhs);
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) > rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) < rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) >= rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) <= rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) == rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) != rhs;
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
#undef CONSTEXPR14
|
||||
|
||||
#endif
|
||||
|
||||
@@ -209,8 +209,8 @@ public:
|
||||
|
||||
/**
|
||||
* Helper function which deduces the value type of a contiguous STL container used in ReadSpan.
|
||||
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
|
||||
* ReadObject and T must be a trivially copyable object.
|
||||
* If T is not a contiguous container as defined by the concept IsContiguousContainer, this
|
||||
* calls ReadObject and T must be a trivially copyable object.
|
||||
*
|
||||
* See ReadSpan for more details if T is a contiguous container.
|
||||
* See ReadObject for more details if T is a trivially copyable object.
|
||||
@@ -223,7 +223,7 @@ public:
|
||||
*/
|
||||
template <typename T>
|
||||
[[nodiscard]] size_t Read(T& data) const {
|
||||
if constexpr (IsSTLContainer<T>) {
|
||||
if constexpr (IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Data type must be trivially copyable.");
|
||||
@@ -235,8 +235,8 @@ public:
|
||||
|
||||
/**
|
||||
* Helper function which deduces the value type of a contiguous STL container used in WriteSpan.
|
||||
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
|
||||
* WriteObject and T must be a trivially copyable object.
|
||||
* If T is not a contiguous STL container as defined by the concept IsContiguousContainer, this
|
||||
* calls WriteObject and T must be a trivially copyable object.
|
||||
*
|
||||
* See WriteSpan for more details if T is a contiguous container.
|
||||
* See WriteObject for more details if T is a trivially copyable object.
|
||||
@@ -249,7 +249,7 @@ public:
|
||||
*/
|
||||
template <typename T>
|
||||
[[nodiscard]] size_t Write(const T& data) const {
|
||||
if constexpr (IsSTLContainer<T>) {
|
||||
if constexpr (IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Data type must be trivially copyable.");
|
||||
|
||||
@@ -100,7 +100,6 @@ enum class CameraError {
|
||||
enum class VibrationAmplificationType {
|
||||
Linear,
|
||||
Exponential,
|
||||
Test,
|
||||
};
|
||||
|
||||
// Analog properties for calibration
|
||||
@@ -325,6 +324,10 @@ public:
|
||||
return VibrationError::NotSupported;
|
||||
}
|
||||
|
||||
virtual bool IsVibrationEnabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) {
|
||||
return PollingError::NotSupported;
|
||||
}
|
||||
|
||||
@@ -190,6 +190,9 @@ add_library(core STATIC
|
||||
hle/kernel/k_code_memory.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
hle/kernel/k_dynamic_page_manager.h
|
||||
hle/kernel/k_dynamic_resource_manager.h
|
||||
hle/kernel/k_dynamic_slab_heap.h
|
||||
hle/kernel/k_event.cpp
|
||||
hle/kernel/k_event.h
|
||||
hle/kernel/k_handle_table.cpp
|
||||
@@ -240,6 +243,8 @@ add_library(core STATIC
|
||||
hle/kernel/k_server_session.h
|
||||
hle/kernel/k_session.cpp
|
||||
hle/kernel/k_session.h
|
||||
hle/kernel/k_session_request.cpp
|
||||
hle/kernel/k_session_request.h
|
||||
hle/kernel/k_shared_memory.cpp
|
||||
hle/kernel/k_shared_memory.h
|
||||
hle/kernel/k_shared_memory_info.h
|
||||
@@ -769,19 +774,15 @@ if (MSVC)
|
||||
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
|
||||
-Wno-sign-conversion
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
|
||||
// If the thread is scheduled for termination, exit the thread.
|
||||
if (current_thread->HasDpc()) {
|
||||
if (current_thread->IsTerminationRequested()) {
|
||||
current_thread->Exit();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit,
|
||||
// or if the thread is unable to continue for any reason.
|
||||
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
||||
|
||||
@@ -133,6 +133,50 @@ struct System::Impl {
|
||||
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
|
||||
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
|
||||
|
||||
void Initialize(System& system) {
|
||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
|
||||
core_timing.SetMulticore(is_multicore);
|
||||
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
||||
|
||||
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
||||
const auto current_time =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
||||
Settings::values.custom_rtc_differential =
|
||||
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
||||
|
||||
// Create a default fs if one doesn't already exist.
|
||||
if (virtual_filesystem == nullptr) {
|
||||
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
||||
}
|
||||
if (content_provider == nullptr) {
|
||||
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
||||
}
|
||||
|
||||
// Create default implementations of applets if one is not provided.
|
||||
applet_manager.SetDefaultAppletsIfMissing();
|
||||
|
||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
|
||||
kernel.SetMulticore(is_multicore);
|
||||
cpu_manager.SetMulticore(is_multicore);
|
||||
cpu_manager.SetAsyncGpu(is_async_gpu);
|
||||
}
|
||||
|
||||
void ReinitializeIfNecessary(System& system) {
|
||||
if (is_multicore == Settings::values.use_multi_core.GetValue()) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DEBUG(Kernel, "Re-initializing");
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
|
||||
Initialize(system);
|
||||
}
|
||||
|
||||
SystemResultStatus Run() {
|
||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||
status = SystemResultStatus::Success;
|
||||
@@ -178,37 +222,14 @@ struct System::Impl {
|
||||
debugger = std::make_unique<Debugger>(system, port);
|
||||
}
|
||||
|
||||
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
|
||||
SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
|
||||
LOG_DEBUG(Core, "initialized OK");
|
||||
|
||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
|
||||
kernel.SetMulticore(is_multicore);
|
||||
cpu_manager.SetMulticore(is_multicore);
|
||||
cpu_manager.SetAsyncGpu(is_async_gpu);
|
||||
core_timing.SetMulticore(is_multicore);
|
||||
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
|
||||
ReinitializeIfNecessary(system);
|
||||
|
||||
kernel.Initialize();
|
||||
cpu_manager.Initialize();
|
||||
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
||||
|
||||
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
||||
const auto current_time =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
||||
Settings::values.custom_rtc_differential =
|
||||
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
||||
|
||||
// Create a default fs if one doesn't already exist.
|
||||
if (virtual_filesystem == nullptr)
|
||||
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
||||
if (content_provider == nullptr)
|
||||
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
||||
|
||||
/// Create default implementations of applets if one is not provided.
|
||||
applet_manager.SetDefaultAppletsIfMissing();
|
||||
|
||||
/// Reset all glue registrations
|
||||
arp_manager.ResetAll();
|
||||
@@ -253,11 +274,11 @@ struct System::Impl {
|
||||
return SystemResultStatus::ErrorGetLoader;
|
||||
}
|
||||
|
||||
SystemResultStatus init_result{Init(system, emu_window)};
|
||||
SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
|
||||
if (init_result != SystemResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
|
||||
static_cast<int>(init_result));
|
||||
Shutdown();
|
||||
ShutdownMainProcess();
|
||||
return init_result;
|
||||
}
|
||||
|
||||
@@ -276,7 +297,7 @@ struct System::Impl {
|
||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||
if (load_result != Loader::ResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
|
||||
Shutdown();
|
||||
ShutdownMainProcess();
|
||||
|
||||
return static_cast<SystemResultStatus>(
|
||||
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
|
||||
@@ -335,7 +356,7 @@ struct System::Impl {
|
||||
return status;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
void ShutdownMainProcess() {
|
||||
SetShuttingDown(true);
|
||||
|
||||
// Log last frame performance stats if game was loded
|
||||
@@ -363,13 +384,14 @@ struct System::Impl {
|
||||
kernel.ShutdownCores();
|
||||
cpu_manager.Shutdown();
|
||||
debugger.reset();
|
||||
services->KillNVNFlinger();
|
||||
kernel.CloseServices();
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
time_manager.Shutdown();
|
||||
core_timing.Shutdown();
|
||||
core_timing.ClearPendingEvents();
|
||||
app_loader.reset();
|
||||
audio_core.reset();
|
||||
gpu_core.reset();
|
||||
@@ -377,7 +399,6 @@ struct System::Impl {
|
||||
perf_stats.reset();
|
||||
kernel.Shutdown();
|
||||
memory.Reset();
|
||||
applet_manager.ClearAll();
|
||||
|
||||
if (auto room_member = room_network.GetRoomMember().lock()) {
|
||||
Network::GameInfo game_info{};
|
||||
@@ -520,6 +541,10 @@ const CpuManager& System::GetCpuManager() const {
|
||||
return impl->cpu_manager;
|
||||
}
|
||||
|
||||
void System::Initialize() {
|
||||
impl->Initialize(*this);
|
||||
}
|
||||
|
||||
SystemResultStatus System::Run() {
|
||||
return impl->Run();
|
||||
}
|
||||
@@ -540,8 +565,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
||||
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
|
||||
}
|
||||
|
||||
void System::Shutdown() {
|
||||
impl->Shutdown();
|
||||
void System::ShutdownMainProcess() {
|
||||
impl->ShutdownMainProcess();
|
||||
}
|
||||
|
||||
bool System::IsShuttingDown() const {
|
||||
|
||||
@@ -142,6 +142,12 @@ public:
|
||||
System(System&&) = delete;
|
||||
System& operator=(System&&) = delete;
|
||||
|
||||
/**
|
||||
* Initializes the system
|
||||
* This function will initialize core functionaility used for system emulation
|
||||
*/
|
||||
void Initialize();
|
||||
|
||||
/**
|
||||
* Run the OS and Application
|
||||
* This function will start emulation and run the relevant devices
|
||||
@@ -166,8 +172,8 @@ public:
|
||||
|
||||
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
||||
|
||||
/// Shutdown the emulated system.
|
||||
void Shutdown();
|
||||
/// Shutdown the main emulated process.
|
||||
void ShutdownMainProcess();
|
||||
|
||||
/// Check if the core is shutting down.
|
||||
[[nodiscard]] bool IsShuttingDown() const;
|
||||
|
||||
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
|
||||
CoreTiming::CoreTiming()
|
||||
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
|
||||
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
CoreTiming::~CoreTiming() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
constexpr char name[] = "HostTiming";
|
||||
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
Reset();
|
||||
on_thread_init = std::move(on_thread_init_);
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
}
|
||||
ClearPendingEvents();
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
|
||||
return CpuCyclesToClockCycles(ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
|
||||
@@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Reset() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
}
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetTimeNS();
|
||||
|
||||
@@ -61,19 +61,14 @@ public:
|
||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||
void Initialize(std::function<void()>&& on_thread_init_);
|
||||
|
||||
/// Tears down all timing related functionality.
|
||||
void Shutdown();
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
/// Sets if emulation is multicore or single core, must be set before Initialize
|
||||
void SetMulticore(bool is_multicore_) {
|
||||
is_multicore = is_multicore_;
|
||||
}
|
||||
|
||||
/// Check if it's using host timing.
|
||||
bool IsHostTiming() const {
|
||||
return is_multicore;
|
||||
}
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread.
|
||||
void Pause(bool is_paused);
|
||||
|
||||
@@ -136,12 +131,11 @@ public:
|
||||
private:
|
||||
struct Event;
|
||||
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
void ThreadLoop();
|
||||
|
||||
void Reset();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
|
||||
s64 global_timer = 0;
|
||||
|
||||
@@ -31,12 +31,14 @@ public:
|
||||
DramMemoryMap::Base;
|
||||
}
|
||||
|
||||
u8* GetPointer(PAddr addr) {
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
template <typename T>
|
||||
T* GetPointer(PAddr addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
const u8* GetPointer(PAddr addr) const {
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
template <typename T>
|
||||
const T* GetPointer(PAddr addr) const {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
Common::HostMemory buffer;
|
||||
|
||||
@@ -232,8 +232,8 @@ const std::vector<std::shared_ptr<NCA>>& XCI::GetNCAs() const {
|
||||
|
||||
std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const {
|
||||
const auto program_id = secure_partition->GetProgramTitleID();
|
||||
const auto iter = std::find_if(
|
||||
ncas.begin(), ncas.end(), [this, type, program_id](const std::shared_ptr<NCA>& nca) {
|
||||
const auto iter =
|
||||
std::find_if(ncas.begin(), ncas.end(), [type, program_id](const std::shared_ptr<NCA>& nca) {
|
||||
return nca->GetType() == type && nca->GetTitleId() == program_id;
|
||||
});
|
||||
return iter == ncas.end() ? nullptr : *iter;
|
||||
|
||||
@@ -127,7 +127,7 @@ void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address
|
||||
}
|
||||
|
||||
bool ProgramMetadata::Is64BitProgram() const {
|
||||
return npdm_header.has_64_bit_instructions;
|
||||
return npdm_header.has_64_bit_instructions.As<bool>();
|
||||
}
|
||||
|
||||
ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/uuid.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs.h"
|
||||
@@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
|
||||
attr.title_id == 0 && attr.save_id == 0);
|
||||
}
|
||||
|
||||
std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
|
||||
u128 user_id) {
|
||||
// Only detect nand user saves.
|
||||
const auto space_id_path = [space_id]() -> std::string_view {
|
||||
switch (space_id) {
|
||||
case SaveDataSpaceId::NandUser:
|
||||
return "/user/save";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}();
|
||||
|
||||
if (space_id_path.empty()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
Common::UUID uuid;
|
||||
std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
|
||||
|
||||
// Only detect account/device saves from the future location.
|
||||
switch (type) {
|
||||
case SaveDataType::SaveData:
|
||||
return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
|
||||
case SaveDataType::DeviceSaveData:
|
||||
return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
std::string SaveDataAttribute::DebugInfo() const {
|
||||
@@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
|
||||
PrintSaveDataAttributeWarnings(meta);
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
auto out = dir->CreateDirectoryRelative(save_directory);
|
||||
|
||||
@@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
|
||||
const SaveDataAttribute& meta) const {
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
auto out = dir->GetDirectoryRelative(save_directory);
|
||||
|
||||
@@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space,
|
||||
SaveDataType type, u64 title_id, u128 user_id,
|
||||
u64 save_id) {
|
||||
std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
|
||||
SaveDataSpaceId space, SaveDataType type, u64 title_id,
|
||||
u128 user_id, u64 save_id) {
|
||||
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
|
||||
// be interpreted as the title id of the current process.
|
||||
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
|
||||
@@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
|
||||
}
|
||||
}
|
||||
|
||||
// For compat with a future impl.
|
||||
if (std::string future_path =
|
||||
GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
|
||||
!future_path.empty()) {
|
||||
// Check if this location exists, and prefer it over the old.
|
||||
if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
|
||||
LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
|
||||
return future_path;
|
||||
}
|
||||
}
|
||||
|
||||
std::string out = GetSaveDataSpaceIdPath(space);
|
||||
|
||||
switch (type) {
|
||||
@@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
|
||||
|
||||
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
u128 user_id) const {
|
||||
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
|
||||
@@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
|
||||
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
|
||||
SaveDataSize new_value) const {
|
||||
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);
|
||||
|
||||
@@ -95,8 +95,8 @@ public:
|
||||
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
|
||||
|
||||
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
|
||||
static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type,
|
||||
u64 title_id, u128 user_id, u64 save_id);
|
||||
static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
|
||||
SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
|
||||
|
||||
SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
|
||||
void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
|
||||
|
||||
@@ -970,14 +970,7 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
|
||||
Common::Input::VibrationError::None;
|
||||
}
|
||||
|
||||
bool EmulatedController::TestVibration(std::size_t device_index) {
|
||||
if (device_index >= output_devices.size()) {
|
||||
return false;
|
||||
}
|
||||
if (!output_devices[device_index]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
|
||||
const auto player_index = NpadIdTypeToIndex(npad_id_type);
|
||||
const auto& player = Settings::values.players.GetValue()[player_index];
|
||||
|
||||
@@ -985,31 +978,15 @@ bool EmulatedController::TestVibration(std::size_t device_index) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Common::Input::VibrationStatus test_vibration = {
|
||||
.low_amplitude = 0.001f,
|
||||
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
|
||||
.high_amplitude = 0.001f,
|
||||
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
|
||||
.type = Common::Input::VibrationAmplificationType::Test,
|
||||
};
|
||||
if (device_index >= output_devices.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Common::Input::VibrationStatus zero_vibration = {
|
||||
.low_amplitude = DEFAULT_VIBRATION_VALUE.low_amplitude,
|
||||
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
|
||||
.high_amplitude = DEFAULT_VIBRATION_VALUE.high_amplitude,
|
||||
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
|
||||
.type = Common::Input::VibrationAmplificationType::Test,
|
||||
};
|
||||
if (!output_devices[device_index]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Send a slight vibration to test for rumble support
|
||||
output_devices[device_index]->SetVibration(test_vibration);
|
||||
|
||||
// Wait for about 15ms to ensure the controller is ready for the stop command
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(15));
|
||||
|
||||
// Stop any vibration and return the result
|
||||
return output_devices[device_index]->SetVibration(zero_vibration) ==
|
||||
Common::Input::VibrationError::None;
|
||||
return output_devices[device_index]->IsVibrationEnabled();
|
||||
}
|
||||
|
||||
bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
|
||||
@@ -1048,6 +1025,7 @@ bool EmulatedController::HasNfc() const {
|
||||
case NpadStyleIndex::JoyconRight:
|
||||
case NpadStyleIndex::JoyconDual:
|
||||
case NpadStyleIndex::ProController:
|
||||
case NpadStyleIndex::Handheld:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
@@ -1158,27 +1136,27 @@ bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
|
||||
const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
|
||||
switch (type) {
|
||||
case NpadStyleIndex::ProController:
|
||||
return supported_style_tag.fullkey;
|
||||
return supported_style_tag.fullkey.As<bool>();
|
||||
case NpadStyleIndex::Handheld:
|
||||
return supported_style_tag.handheld;
|
||||
return supported_style_tag.handheld.As<bool>();
|
||||
case NpadStyleIndex::JoyconDual:
|
||||
return supported_style_tag.joycon_dual;
|
||||
return supported_style_tag.joycon_dual.As<bool>();
|
||||
case NpadStyleIndex::JoyconLeft:
|
||||
return supported_style_tag.joycon_left;
|
||||
return supported_style_tag.joycon_left.As<bool>();
|
||||
case NpadStyleIndex::JoyconRight:
|
||||
return supported_style_tag.joycon_right;
|
||||
return supported_style_tag.joycon_right.As<bool>();
|
||||
case NpadStyleIndex::GameCube:
|
||||
return supported_style_tag.gamecube;
|
||||
return supported_style_tag.gamecube.As<bool>();
|
||||
case NpadStyleIndex::Pokeball:
|
||||
return supported_style_tag.palma;
|
||||
return supported_style_tag.palma.As<bool>();
|
||||
case NpadStyleIndex::NES:
|
||||
return supported_style_tag.lark;
|
||||
return supported_style_tag.lark.As<bool>();
|
||||
case NpadStyleIndex::SNES:
|
||||
return supported_style_tag.lucia;
|
||||
return supported_style_tag.lucia.As<bool>();
|
||||
case NpadStyleIndex::N64:
|
||||
return supported_style_tag.lagoon;
|
||||
return supported_style_tag.lagoon.As<bool>();
|
||||
case NpadStyleIndex::SegaGenesis:
|
||||
return supported_style_tag.lager;
|
||||
return supported_style_tag.lager.As<bool>();
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@@ -1234,12 +1212,6 @@ bool EmulatedController::IsConnected(bool get_temporary_value) const {
|
||||
return is_connected;
|
||||
}
|
||||
|
||||
bool EmulatedController::IsVibrationEnabled() const {
|
||||
const auto player_index = NpadIdTypeToIndex(npad_id_type);
|
||||
const auto& player = Settings::values.players.GetValue()[player_index];
|
||||
return player.vibration_enabled;
|
||||
}
|
||||
|
||||
NpadIdType EmulatedController::GetNpadIdType() const {
|
||||
std::scoped_lock lock{mutex};
|
||||
return npad_id_type;
|
||||
|
||||
@@ -206,9 +206,6 @@ public:
|
||||
*/
|
||||
bool IsConnected(bool get_temporary_value = false) const;
|
||||
|
||||
/// Returns true if vibration is enabled
|
||||
bool IsVibrationEnabled() const;
|
||||
|
||||
/// Removes all callbacks created from input devices
|
||||
void UnloadInput();
|
||||
|
||||
@@ -339,7 +336,7 @@ public:
|
||||
* Sends a small vibration to the output device
|
||||
* @return true if SetVibration was successfull
|
||||
*/
|
||||
bool TestVibration(std::size_t device_index);
|
||||
bool IsVibrationEnabled(std::size_t device_index);
|
||||
|
||||
/**
|
||||
* Sets the desired data to be polled from a controller
|
||||
|
||||
@@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 {
|
||||
Low,
|
||||
Medium,
|
||||
High,
|
||||
Unkown3, // This level can't be reached
|
||||
Unknown3, // This level can't be reached
|
||||
};
|
||||
|
||||
// This is nn::irsensor::CameraLightTarget
|
||||
@@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 {
|
||||
enum class IrCameraInternalStatus : u32 {
|
||||
Stopped,
|
||||
FirmwareUpdateNeeded,
|
||||
Unkown2,
|
||||
Unkown3,
|
||||
Unkown4,
|
||||
Unknown2,
|
||||
Unknown3,
|
||||
Unknown4,
|
||||
FirmwareVersionRequested,
|
||||
FirmwareVersionIsInvalid,
|
||||
Ready,
|
||||
@@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 {
|
||||
|
||||
// This is nn::irsensor::MomentProcessorPreprocess
|
||||
enum class MomentProcessorPreprocess : u32 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
// This is nn::irsensor::PackedMomentProcessorPreprocess
|
||||
enum class PackedMomentProcessorPreprocess : u8 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
// This is nn::irsensor::PointingStatus
|
||||
enum class PointingStatus : u32 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
struct IrsRect {
|
||||
|
||||
@@ -86,13 +86,13 @@ public:
|
||||
u32 num_domain_objects{};
|
||||
const bool always_move_handles{
|
||||
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
|
||||
if (!ctx.Session()->IsDomain() || always_move_handles) {
|
||||
if (!ctx.Session()->GetSessionRequestManager()->IsDomain() || always_move_handles) {
|
||||
num_handles_to_move = num_objects_to_move;
|
||||
} else {
|
||||
num_domain_objects = num_objects_to_move;
|
||||
}
|
||||
|
||||
if (ctx.Session()->IsDomain()) {
|
||||
if (ctx.Session()->GetSessionRequestManager()->IsDomain()) {
|
||||
raw_data_size +=
|
||||
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
|
||||
ctx.write_size += num_domain_objects;
|
||||
@@ -125,7 +125,8 @@ public:
|
||||
if (!ctx.IsTipc()) {
|
||||
AlignWithPadding();
|
||||
|
||||
if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) {
|
||||
if (ctx.Session()->GetSessionRequestManager()->IsDomain() &&
|
||||
ctx.HasDomainMessageHeader()) {
|
||||
IPC::DomainMessageHeader domain_header{};
|
||||
domain_header.num_objects = num_domain_objects;
|
||||
PushRaw(domain_header);
|
||||
@@ -145,7 +146,7 @@ public:
|
||||
|
||||
template <class T>
|
||||
void PushIpcInterface(std::shared_ptr<T> iface) {
|
||||
if (context->Session()->IsDomain()) {
|
||||
if (context->Session()->GetSessionRequestManager()->IsDomain()) {
|
||||
context->AddDomainObject(std::move(iface));
|
||||
} else {
|
||||
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
|
||||
@@ -386,7 +387,7 @@ public:
|
||||
|
||||
template <class T>
|
||||
std::weak_ptr<T> PopIpcInterface() {
|
||||
ASSERT(context->Session()->IsDomain());
|
||||
ASSERT(context->Session()->GetSessionRequestManager()->IsDomain());
|
||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
||||
}
|
||||
@@ -405,7 +406,7 @@ inline s32 RequestParser::Pop() {
|
||||
}
|
||||
|
||||
// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects.
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
||||
#endif
|
||||
@@ -416,7 +417,7 @@ void RequestParser::PopRaw(T& value) {
|
||||
std::memcpy(&value, cmdbuf + index, sizeof(T));
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
||||
@@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
woken_dummy_threads.insert(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
woken_dummy_threads.erase(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
for (auto* thread : woken_dummy_threads) {
|
||||
thread->DummyThreadEndWait();
|
||||
}
|
||||
|
||||
woken_dummy_threads.clear();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
@@ -58,6 +59,10 @@ public:
|
||||
/// Returns true if the global scheduler lock is acquired
|
||||
bool IsLocked() const;
|
||||
|
||||
void UnregisterDummyThreadForWakeup(KThread* thread);
|
||||
void RegisterDummyThreadForWakeup(KThread* thread);
|
||||
void WakeupWaitingDummyThreads();
|
||||
|
||||
[[nodiscard]] LockType& SchedulerLock() {
|
||||
return scheduler_lock;
|
||||
}
|
||||
@@ -76,6 +81,9 @@ private:
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists dummy threads pending wakeup on lock release
|
||||
std::set<KThread*> woken_dummy_threads;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<KThread*> thread_list;
|
||||
std::mutex global_list_guard;
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "core/hle/kernel/k_server_session.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||
}
|
||||
}
|
||||
|
||||
Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
|
||||
HLERequestContext& context) {
|
||||
Result result = ResultSuccess;
|
||||
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (this->HasSessionRequestHandler(context)) {
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(server_session, context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (this->HasSessionHandler()) {
|
||||
// If this manager has an associated HLE handler, forward the request to it.
|
||||
result = this->SessionHandler().HandleSyncRequest(*server_session, context);
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
|
||||
IPC::ResponseBuilder rb(context, 2);
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
||||
this->ConvertToDomain();
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
|
||||
HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
context.SetSessionRequestManager(server_session->GetSessionRequestManager());
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > this->DomainHandlerCount()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"needed to return a new interface!",
|
||||
object_id);
|
||||
ASSERT(false);
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*server_session, context);
|
||||
} else {
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
this->CloseDomainHandler(object_id - 1);
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
return ResultSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result SessionRequestManager::QueueSyncRequest(KSession* parent,
|
||||
std::shared_ptr<HLERequestContext>&& context) {
|
||||
// Ensure we have a session request handler
|
||||
if (this->HasSessionRequestHandler(*context)) {
|
||||
if (auto strong_ptr = this->GetServiceThread().lock()) {
|
||||
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
||||
} else {
|
||||
ASSERT_MSG(false, "strong_ptr is nullptr!");
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "handler is invalid!");
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||
session->ClientConnected(shared_from_this());
|
||||
session->GetSessionRequestManager()->SetSessionHandler(shared_from_this());
|
||||
|
||||
// Ensure our server session is tracked globally.
|
||||
kernel.RegisterServerObject(session);
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
||||
session->ClientDisconnected();
|
||||
}
|
||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
|
||||
KServerSession* server_session_, KThread* thread_)
|
||||
@@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
// Padding to align to 16 bytes
|
||||
rp.AlignWithPadding();
|
||||
|
||||
if (Session()->IsDomain() &&
|
||||
if (Session()->GetSessionRequestManager()->IsDomain() &&
|
||||
((command_header->type == IPC::CommandType::Request ||
|
||||
command_header->type == IPC::CommandType::RequestWithContext) ||
|
||||
!incoming)) {
|
||||
@@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
if (incoming || domain_message_header) {
|
||||
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
|
||||
} else {
|
||||
if (Session()->IsDomain()) {
|
||||
if (Session()->GetSessionRequestManager()->IsDomain()) {
|
||||
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
|
||||
}
|
||||
}
|
||||
@@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
|
||||
// Write the domain objects to the command buffer, these go after the raw untranslated data.
|
||||
// TODO(Subv): This completely ignores C buffers.
|
||||
|
||||
if (Session()->IsDomain()) {
|
||||
if (server_session->GetSessionRequestManager()->IsDomain()) {
|
||||
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
|
||||
for (const auto& object : outgoing_domain_objects) {
|
||||
server_session->AppendDomainHandler(object);
|
||||
cmd_buf[current_offset++] =
|
||||
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
|
||||
for (auto& object : outgoing_domain_objects) {
|
||||
server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object));
|
||||
cmd_buf[current_offset++] = static_cast<u32_le>(
|
||||
server_session->GetSessionRequestManager()->DomainHandlerCount());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -121,6 +121,10 @@ public:
|
||||
is_domain = true;
|
||||
}
|
||||
|
||||
void ConvertToDomainOnRequestEnd() {
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
std::size_t DomainHandlerCount() const {
|
||||
return domain_handlers.size();
|
||||
}
|
||||
@@ -164,7 +168,12 @@ public:
|
||||
|
||||
bool HasSessionRequestHandler(const HLERequestContext& context) const;
|
||||
|
||||
Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
|
||||
Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
|
||||
Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
bool convert_to_domain{};
|
||||
bool is_domain{};
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
std::vector<SessionRequestHandlerPtr> domain_handlers;
|
||||
@@ -295,7 +304,7 @@ public:
|
||||
*/
|
||||
template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
|
||||
std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
|
||||
if constexpr (Common::IsSTLContainer<T>) {
|
||||
if constexpr (Common::IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Container to WriteBuffer must contain trivially copyable objects");
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_session.h"
|
||||
#include "core/hle/kernel/k_session_request.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_shared_memory_info.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
@@ -34,6 +35,7 @@ namespace Kernel::Init {
|
||||
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
|
||||
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
||||
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
||||
HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
|
||||
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
||||
@@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
||||
|
||||
if (size > 0) {
|
||||
void* backing_kernel_memory{
|
||||
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||
void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
|
||||
TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||
|
||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||
ASSERT(region != nullptr);
|
||||
@@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
|
||||
ASSERT(slab_address != 0);
|
||||
|
||||
// Initialize the slabheap.
|
||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
|
||||
slab_size);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_client_session.h"
|
||||
#include "core/hle/kernel/k_server_session.h"
|
||||
@@ -10,6 +11,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static constexpr u32 MessageBufferSize = 0x100;
|
||||
|
||||
KClientSession::KClientSession(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
KClientSession::~KClientSession() = default;
|
||||
@@ -22,8 +25,16 @@ void KClientSession::Destroy() {
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
Result KClientSession::SendSyncRequest() {
|
||||
// Signal the server session that new data is available
|
||||
return parent->GetServerSession().OnRequest();
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
|
||||
|
||||
// Send the request.
|
||||
return parent->GetServerSession().OnRequest(request);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||
|
||||
// Clear the memory.
|
||||
for (const auto& block : m_page_group.Nodes()) {
|
||||
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
|
||||
}
|
||||
|
||||
// Set remaining tracking members.
|
||||
|
||||
136
src/core/hle/kernel/k_dynamic_page_manager.h
Normal file
136
src/core/hle/kernel/k_dynamic_page_manager.h
Normal file
@@ -0,0 +1,136 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_bitmap.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KDynamicPageManager {
|
||||
public:
|
||||
class PageBuffer {
|
||||
private:
|
||||
u8 m_buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageBuffer) == PageSize);
|
||||
|
||||
public:
|
||||
KDynamicPageManager() = default;
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr addr) {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(VAddr addr) const {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
Result Initialize(VAddr addr, size_t sz) {
|
||||
// We need to have positive size.
|
||||
R_UNLESS(sz > 0, ResultOutOfMemory);
|
||||
m_backing_memory.resize(sz);
|
||||
|
||||
// Calculate management overhead.
|
||||
const size_t management_size =
|
||||
KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
|
||||
const size_t allocatable_size = sz - management_size;
|
||||
|
||||
// Set tracking fields.
|
||||
m_address = addr;
|
||||
m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer));
|
||||
m_count = allocatable_size / sizeof(PageBuffer);
|
||||
R_UNLESS(m_count > 0, ResultOutOfMemory);
|
||||
|
||||
// Clear the management region.
|
||||
u64* management_ptr = GetPointer<u64>(m_address + allocatable_size);
|
||||
std::memset(management_ptr, 0, management_size);
|
||||
|
||||
// Initialize the bitmap.
|
||||
m_page_bitmap.Initialize(management_ptr, m_count);
|
||||
|
||||
// Free the pages to the bitmap.
|
||||
for (size_t i = 0; i < m_count; i++) {
|
||||
// Ensure the freed page is all-zero.
|
||||
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
|
||||
|
||||
// Set the bit for the free page.
|
||||
m_page_bitmap.SetBit(i);
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
size_t GetUsed() const {
|
||||
return m_used;
|
||||
}
|
||||
size_t GetPeak() const {
|
||||
return m_peak;
|
||||
}
|
||||
size_t GetCount() const {
|
||||
return m_count;
|
||||
}
|
||||
|
||||
PageBuffer* Allocate() {
|
||||
// Take the lock.
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Find a random free block.
|
||||
s64 soffset = m_page_bitmap.FindFreeBlock(true);
|
||||
if (soffset < 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
// Update our tracking.
|
||||
m_page_bitmap.ClearBit(offset);
|
||||
m_peak = std::max(m_peak, (++m_used));
|
||||
|
||||
return GetPointer<PageBuffer>(m_address) + offset;
|
||||
}
|
||||
|
||||
void Free(PageBuffer* pb) {
|
||||
// Ensure all pages in the heap are zero.
|
||||
std::memset(pb, 0, PageSize);
|
||||
|
||||
// Take the lock.
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Set the bit for the free page.
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer);
|
||||
m_page_bitmap.SetBit(offset);
|
||||
|
||||
// Decrement our used count.
|
||||
--m_used;
|
||||
}
|
||||
|
||||
private:
|
||||
KSpinLock m_lock;
|
||||
KPageBitmap m_page_bitmap;
|
||||
size_t m_used{};
|
||||
size_t m_peak{};
|
||||
size_t m_count{};
|
||||
VAddr m_address{};
|
||||
size_t m_size{};
|
||||
|
||||
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
|
||||
std::vector<u8> m_backing_memory;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
58
src/core/hle/kernel/k_dynamic_resource_manager.h
Normal file
58
src/core/hle/kernel/k_dynamic_resource_manager.h
Normal file
@@ -0,0 +1,58 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "core/hle/kernel/k_dynamic_slab_heap.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicResourceManager {
|
||||
YUZU_NON_COPYABLE(KDynamicResourceManager);
|
||||
YUZU_NON_MOVEABLE(KDynamicResourceManager);
|
||||
|
||||
public:
|
||||
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
|
||||
|
||||
public:
|
||||
constexpr KDynamicResourceManager() = default;
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return m_slab_heap->GetSize();
|
||||
}
|
||||
constexpr size_t GetUsed() const {
|
||||
return m_slab_heap->GetUsed();
|
||||
}
|
||||
constexpr size_t GetPeak() const {
|
||||
return m_slab_heap->GetPeak();
|
||||
}
|
||||
constexpr size_t GetCount() const {
|
||||
return m_slab_heap->GetCount();
|
||||
}
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
|
||||
m_page_allocator = page_allocator;
|
||||
m_slab_heap = slab_heap;
|
||||
}
|
||||
|
||||
T* Allocate() const {
|
||||
return m_slab_heap->Allocate(m_page_allocator);
|
||||
}
|
||||
|
||||
void Free(T* t) const {
|
||||
m_slab_heap->Free(t);
|
||||
}
|
||||
|
||||
private:
|
||||
KDynamicPageManager* m_page_allocator{};
|
||||
DynamicSlabType* m_slab_heap{};
|
||||
};
|
||||
|
||||
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
|
||||
|
||||
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
|
||||
|
||||
} // namespace Kernel
|
||||
122
src/core/hle/kernel/k_dynamic_slab_heap.h
Normal file
122
src/core/hle/kernel/k_dynamic_slab_heap.h
Normal file
@@ -0,0 +1,122 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "core/hle/kernel/k_dynamic_page_manager.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
|
||||
YUZU_NON_COPYABLE(KDynamicSlabHeap);
|
||||
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
|
||||
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
constexpr size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
constexpr size_t GetUsed() const {
|
||||
return m_used.load();
|
||||
}
|
||||
constexpr size_t GetPeak() const {
|
||||
return m_peak.load();
|
||||
}
|
||||
constexpr size_t GetCount() const {
|
||||
return m_count.load();
|
||||
}
|
||||
|
||||
constexpr bool IsInRange(VAddr addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
}
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
|
||||
ASSERT(page_allocator != nullptr);
|
||||
|
||||
// Initialize members.
|
||||
m_address = page_allocator->GetAddress();
|
||||
m_size = page_allocator->GetSize();
|
||||
|
||||
// Initialize the base allocator.
|
||||
KSlabHeapImpl::Initialize();
|
||||
|
||||
// Allocate until we have the correct number of objects.
|
||||
while (m_count.load() < num_objects) {
|
||||
auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
|
||||
ASSERT(allocated != nullptr);
|
||||
|
||||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
KSlabHeapImpl::Free(allocated + i);
|
||||
}
|
||||
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
|
||||
T* Allocate(KDynamicPageManager* page_allocator) {
|
||||
T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
|
||||
|
||||
// If we successfully allocated and we should clear the node, do so.
|
||||
if constexpr (ClearNode) {
|
||||
if (allocated != nullptr) [[likely]] {
|
||||
reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// If we fail to allocate, try to get a new page from our next allocator.
|
||||
if (allocated == nullptr) [[unlikely]] {
|
||||
if (page_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T*>(page_allocator->Allocate());
|
||||
if (allocated != nullptr) {
|
||||
// If we succeeded in getting a page, free the rest to our slab.
|
||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
KSlabHeapImpl::Free(allocated + i);
|
||||
}
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (allocated != nullptr) [[likely]] {
|
||||
// Construct the object.
|
||||
std::construct_at(allocated);
|
||||
|
||||
// Update our tracking.
|
||||
const size_t used = ++m_used;
|
||||
size_t peak = m_peak.load();
|
||||
while (peak < used) {
|
||||
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allocated;
|
||||
}
|
||||
|
||||
void Free(T* t) {
|
||||
KSlabHeapImpl::Free(t);
|
||||
--m_used;
|
||||
}
|
||||
|
||||
private:
|
||||
using PageBuffer = KDynamicPageManager::PageBuffer;
|
||||
|
||||
private:
|
||||
std::atomic<size_t> m_used{};
|
||||
std::atomic<size_t> m_peak{};
|
||||
std::atomic<size_t> m_count{};
|
||||
VAddr m_address{};
|
||||
size_t m_size{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -11,29 +11,34 @@
|
||||
namespace Kernel::KInterruptManager {
|
||||
|
||||
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
auto* process = kernel.CurrentProcess();
|
||||
if (!process) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Acknowledge the interrupt.
|
||||
kernel.PhysicalCore(core_id).ClearInterrupt();
|
||||
|
||||
auto& current_thread = GetCurrentThread(kernel);
|
||||
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
if (auto* process = kernel.CurrentProcess(); process) {
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Pin the current thread.
|
||||
process->PinCurrentThread(core_id);
|
||||
// Pin the current thread.
|
||||
process->PinCurrentThread(core_id);
|
||||
|
||||
// Set the interrupt flag for the thread.
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
// Set the interrupt flag for the thread.
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
}
|
||||
}
|
||||
|
||||
// Request interrupt scheduling.
|
||||
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
|
||||
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
|
||||
if (core_mask & (1ULL << core_id)) {
|
||||
kernel.PhysicalCore(core_id).Interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::KInterruptManager
|
||||
|
||||
@@ -11,6 +11,8 @@ class KernelCore;
|
||||
|
||||
namespace KInterruptManager {
|
||||
void HandleInterrupt(KernelCore& kernel, s32 core_id);
|
||||
}
|
||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
|
||||
|
||||
} // namespace KInterruptManager
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
|
||||
public KSlabAllocated<KLinkedListNode> {
|
||||
|
||||
public:
|
||||
explicit KLinkedListNode(KernelCore&) {}
|
||||
KLinkedListNode() = default;
|
||||
|
||||
void Initialize(void* it) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
@@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
|
||||
|
||||
enum class KMemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
DontCareMask = 0x80,
|
||||
All = 0xFF,
|
||||
UserMask = All,
|
||||
|
||||
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||
@@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 {
|
||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||
|
||||
SetMask = Uncached,
|
||||
|
||||
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
|
||||
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
|
||||
enum class KMemoryBlockDisableMergeAttribute : u8 {
|
||||
None = 0,
|
||||
Normal = (1u << 0),
|
||||
DeviceLeft = (1u << 1),
|
||||
IpcLeft = (1u << 2),
|
||||
Locked = (1u << 3),
|
||||
DeviceRight = (1u << 4),
|
||||
|
||||
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
|
||||
AllRight = DeviceRight,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
|
||||
|
||||
struct KMemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
KMemoryAttribute attribute{};
|
||||
KMemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
uintptr_t m_address;
|
||||
size_t m_size;
|
||||
KMemoryState m_state;
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
KMemoryPermission m_permission;
|
||||
KMemoryAttribute m_attribute;
|
||||
KMemoryPermission m_original_permission;
|
||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||
|
||||
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
.addr = m_address,
|
||||
.size = m_size,
|
||||
.state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
|
||||
.attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
|
||||
.perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
|
||||
.ipc_refcount = m_ipc_lock_count,
|
||||
.device_refcount = m_device_use_count,
|
||||
.padding = {},
|
||||
};
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
constexpr uintptr_t GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
|
||||
constexpr size_t GetNumPages() const {
|
||||
return this->GetSize() / PageSize;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
|
||||
constexpr uintptr_t GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
|
||||
constexpr uintptr_t GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcLockCount() const {
|
||||
return m_ipc_lock_count;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcDisableMergeCount() const {
|
||||
return m_ipc_disable_merge_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryState GetState() const {
|
||||
return state;
|
||||
}
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return attribute;
|
||||
return m_state;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return perm;
|
||||
return m_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||
return m_original_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||
return m_disable_merge_attribute;
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlock final {
|
||||
friend class KMemoryBlockManager;
|
||||
|
||||
class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
KMemoryState state{KMemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
KMemoryPermission perm{KMemoryPermission::None};
|
||||
KMemoryPermission original_perm{KMemoryPermission::None};
|
||||
KMemoryAttribute attribute{KMemoryAttribute::None};
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
VAddr m_address;
|
||||
size_t m_num_pages;
|
||||
KMemoryState m_memory_state;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
KMemoryPermission m_permission;
|
||||
KMemoryPermission m_original_permission;
|
||||
KMemoryAttribute m_attribute;
|
||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
||||
@@ -261,113 +297,349 @@ public:
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr KMemoryBlock() = default;
|
||||
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
|
||||
KMemoryPermission perm_, KMemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
return m_address;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
constexpr size_t GetNumPages() const {
|
||||
return m_num_pages;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return GetNumPages() * PageSize;
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcLockCount() const {
|
||||
return m_ipc_lock_count;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcDisableMergeCount() const {
|
||||
return m_ipc_disable_merge_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return m_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||
return m_original_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
.m_address = this->GetAddress(),
|
||||
.m_size = this->GetSize(),
|
||||
.m_state = m_memory_state,
|
||||
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
|
||||
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
|
||||
.m_ipc_lock_count = m_ipc_lock_count,
|
||||
.m_device_use_count = m_device_use_count,
|
||||
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
|
||||
.m_permission = m_permission,
|
||||
.m_attribute = m_attribute,
|
||||
.m_original_permission = m_original_permission,
|
||||
.m_disable_merge_attribute = m_disable_merge_attribute,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= KMemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
public:
|
||||
explicit KMemoryBlock() = default;
|
||||
|
||||
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr)
|
||||
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
|
||||
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
|
||||
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
|
||||
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
|
||||
m_original_permission(KMemoryPermission::None), m_attribute(attr),
|
||||
m_disable_merge_attribute() {}
|
||||
|
||||
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr) {
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_device_disable_merge_right_count = 0;
|
||||
m_address = addr;
|
||||
m_num_pages = np;
|
||||
m_memory_state = ms;
|
||||
m_ipc_lock_count = 0;
|
||||
m_device_use_count = 0;
|
||||
m_permission = p;
|
||||
m_original_permission = KMemoryPermission::None;
|
||||
m_attribute = attr;
|
||||
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
|
||||
}
|
||||
|
||||
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~KMemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
constexpr auto AttributeIgnoreMask =
|
||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
||||
return m_memory_state == s && m_permission == p &&
|
||||
(m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
|
||||
m_original_permission == rhs.m_original_permission &&
|
||||
m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
|
||||
m_device_use_count == rhs.m_device_use_count;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr start) const {
|
||||
return GetAddress() <= start && start <= GetEndAddress();
|
||||
constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
|
||||
return this->HasSameProperties(rhs) &&
|
||||
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
|
||||
KMemoryBlockDisableMergeAttribute::None &&
|
||||
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
|
||||
KMemoryBlockDisableMergeAttribute::None;
|
||||
}
|
||||
|
||||
constexpr void Add(std::size_t count) {
|
||||
ASSERT(count > 0);
|
||||
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||
|
||||
num_pages += count;
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
|
||||
KMemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == KMemoryPermission::None);
|
||||
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
constexpr void Add(const KMemoryBlock& added_block) {
|
||||
ASSERT(added_block.GetNumPages() > 0);
|
||||
ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
|
||||
this->GetEndAddress() + added_block.GetSize() - 1);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<KMemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
m_num_pages += added_block.GetNumPages();
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
|
||||
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
|
||||
bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
|
||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
|
||||
KMemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
block.ipc_lock_count = ipc_lock_count;
|
||||
block.device_use_count = device_use_count;
|
||||
block.perm = perm;
|
||||
block.original_perm = original_perm;
|
||||
block.attribute = attribute;
|
||||
m_memory_state = s;
|
||||
m_permission = p;
|
||||
m_attribute = static_cast<KMemoryAttribute>(
|
||||
a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
|
||||
addr = split_addr;
|
||||
num_pages -= block.num_pages;
|
||||
if (set_disable_merge_attr && set_mask != 0) {
|
||||
m_disable_merge_attribute = m_disable_merge_attribute |
|
||||
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
|
||||
}
|
||||
if (clear_mask != 0) {
|
||||
m_disable_merge_attribute = m_disable_merge_attribute &
|
||||
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
|
||||
}
|
||||
}
|
||||
|
||||
return block;
|
||||
constexpr void Split(KMemoryBlock* block, VAddr addr) {
|
||||
ASSERT(this->GetAddress() < addr);
|
||||
ASSERT(this->Contains(addr));
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
|
||||
block->m_address = m_address;
|
||||
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
|
||||
block->m_memory_state = m_memory_state;
|
||||
block->m_ipc_lock_count = m_ipc_lock_count;
|
||||
block->m_device_use_count = m_device_use_count;
|
||||
block->m_permission = m_permission;
|
||||
block->m_original_permission = m_original_permission;
|
||||
block->m_attribute = m_attribute;
|
||||
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
|
||||
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
|
||||
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
|
||||
block->m_device_disable_merge_right_count = 0;
|
||||
|
||||
m_address = addr;
|
||||
m_num_pages -= block->m_num_pages;
|
||||
|
||||
m_ipc_disable_merge_count = 0;
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
||||
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
|
||||
ASSERT(new_device_disable_merge_left_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
if (right) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
|
||||
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
|
||||
ASSERT(new_device_disable_merge_right_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
|
||||
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// We must either be shared or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
m_device_use_count == 0);
|
||||
|
||||
// Share.
|
||||
const u16 new_count = ++m_device_use_count;
|
||||
ASSERT(new_count > 0);
|
||||
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
|
||||
if (left) {
|
||||
if (!m_device_disable_merge_left_count) {
|
||||
return;
|
||||
}
|
||||
--m_device_disable_merge_left_count;
|
||||
}
|
||||
|
||||
m_device_disable_merge_left_count =
|
||||
std::min(m_device_disable_merge_left_count, m_device_use_count);
|
||||
|
||||
if (m_device_disable_merge_left_count == 0) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
if (right) {
|
||||
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
|
||||
ASSERT(old_device_disable_merge_right_count > 0);
|
||||
if (old_device_disable_merge_right_count == 1) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// We must be shared.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
|
||||
// Unhare.
|
||||
const u16 old_count = m_device_use_count--;
|
||||
ASSERT(old_count > 0);
|
||||
|
||||
if (old_count == 1) {
|
||||
m_attribute =
|
||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
||||
}
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
|
||||
// We must be shared.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
|
||||
// Unhare.
|
||||
const u16 old_count = m_device_use_count--;
|
||||
ASSERT(old_count > 0);
|
||||
|
||||
if (old_count == 1) {
|
||||
m_attribute =
|
||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
||||
}
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
// We must either be locked or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
|
||||
m_ipc_lock_count == 0);
|
||||
|
||||
// Lock.
|
||||
const u16 new_lock_count = ++m_ipc_lock_count;
|
||||
ASSERT(new_lock_count > 0);
|
||||
|
||||
// If this is our first lock, update our permissions.
|
||||
if (new_lock_count == 1) {
|
||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
||||
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
|
||||
(m_permission | KMemoryPermission::NotMapped));
|
||||
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
|
||||
KMemoryPermission::UserExecute ||
|
||||
(new_perm == KMemoryPermission::UserRead));
|
||||
m_original_permission = m_permission;
|
||||
m_permission = static_cast<KMemoryPermission>(
|
||||
(new_perm & KMemoryPermission::IpcLockChangeMask) |
|
||||
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
|
||||
}
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
|
||||
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
|
||||
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
|
||||
ASSERT(new_ipc_disable_merge_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
[[maybe_unused]] bool right) {
|
||||
// We must be locked.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
|
||||
|
||||
// Unlock.
|
||||
const u16 old_lock_count = m_ipc_lock_count--;
|
||||
ASSERT(old_lock_count > 0);
|
||||
|
||||
// If this is our last unlock, update our permissions.
|
||||
if (old_lock_count == 1) {
|
||||
ASSERT(m_original_permission != KMemoryPermission::None);
|
||||
m_permission = m_original_permission;
|
||||
m_original_permission = KMemoryPermission::None;
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
|
||||
}
|
||||
|
||||
if (left) {
|
||||
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
|
||||
ASSERT(old_ipc_disable_merge_count > 0);
|
||||
if (old_ipc_disable_merge_count == 1) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||
return m_disable_merge_attribute;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||
|
||||
@@ -2,221 +2,336 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
|
||||
: start_addr{start_addr_}, end_addr{end_addr_} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryAttribute::None);
|
||||
KMemoryBlockManager::KMemoryBlockManager() = default;
|
||||
|
||||
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
|
||||
// Allocate a block to encapsulate the address space, insert it into the tree.
|
||||
KMemoryBlock* start_block = slab_manager->Allocate();
|
||||
R_UNLESS(start_block != nullptr, ResultOutOfResource);
|
||||
|
||||
// Set our start and end.
|
||||
m_start_address = st;
|
||||
m_end_address = nd;
|
||||
ASSERT(Common::IsAligned(m_start_address, PageSize));
|
||||
ASSERT(Common::IsAligned(m_end_address, PageSize));
|
||||
|
||||
// Initialize and insert the block.
|
||||
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
|
||||
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
|
||||
m_memory_block_tree.insert(*start_block);
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
|
||||
return node;
|
||||
}
|
||||
node = std::next(node);
|
||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||
HostUnmapCallback&& host_unmap_callback) {
|
||||
// Erase every block until we have none left.
|
||||
auto it = m_memory_block_tree.begin();
|
||||
while (it != m_memory_block_tree.end()) {
|
||||
KMemoryBlock* block = std::addressof(*it);
|
||||
it = m_memory_block_tree.erase(it);
|
||||
slab_manager->Free(block);
|
||||
host_unmap_callback(block->GetAddress(), block->GetSize());
|
||||
}
|
||||
return end();
|
||||
|
||||
ASSERT(m_memory_block_tree.empty());
|
||||
}
|
||||
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align,
|
||||
std::size_t offset, std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
|
||||
size_t num_pages, size_t alignment, size_t offset,
|
||||
size_t guard_pages) const {
|
||||
if (num_pages > 0) {
|
||||
const VAddr region_end = region_start + region_num_pages * PageSize;
|
||||
const VAddr region_last = region_end - 1;
|
||||
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
|
||||
it++) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
if (info.m_state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||
const VAddr region_last{region_end - 1};
|
||||
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||
const auto info{it->GetMemoryInfo()};
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
if (info.state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
|
||||
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
||||
|
||||
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||
area += guard_pages * PageSize;
|
||||
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const VAddr area_last = area_end - 1;
|
||||
|
||||
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||
|
||||
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||
const VAddr area_last{area_end - 1};
|
||||
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attribute) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages) {
|
||||
// Find the iterator now that we've updated.
|
||||
iterator it = this->FindIterator(address);
|
||||
if (address != m_start_address) {
|
||||
it--;
|
||||
}
|
||||
|
||||
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||
node = next_node;
|
||||
continue;
|
||||
}
|
||||
|
||||
iterator new_node{node};
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
// Coalesce blocks that we can.
|
||||
while (true) {
|
||||
iterator prev = it++;
|
||||
if (it == m_memory_block_tree.end()) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
if (prev->CanMergeWith(*it)) {
|
||||
KMemoryBlock* block = std::addressof(*it);
|
||||
m_memory_block_tree.erase(it);
|
||||
prev->Add(*block);
|
||||
allocator->Free(block);
|
||||
it = prev;
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(state, perm, attr)) {
|
||||
// If we already have the right properties, just advance.
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages =
|
||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
}
|
||||
} else {
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.GetAddress() != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
// If we need to, create a new block after and insert it.
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
||||
static_cast<u8>(clear_disable_attr));
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages, KMemoryState test_state,
|
||||
KMemoryPermission test_perm, KMemoryAttribute test_attr,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(test_state, test_perm, test_attr) &&
|
||||
!it->HasProperties(state, perm, attr)) {
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.GetAddress() != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
// If we need to, create a new block after and insert it.
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, false, 0, 0);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
} else {
|
||||
// If we already have the right properties, just advance.
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages =
|
||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
}
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, MemoryBlockLockFunction lock_func,
|
||||
KMemoryPermission perm) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
iterator new_node{node};
|
||||
const VAddr end_address = address + (num_pages * PageSize);
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.m_address != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
lock_func(new_node, perm);
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
break;
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
// If we need to, create a new block after and insert it.
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
// Call the locked update function.
|
||||
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
|
||||
cur_info.GetEndAddress() == end_address);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
KMemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
it = std::next(it);
|
||||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
// Debug.
|
||||
bool KMemoryBlockManager::CheckState() const {
|
||||
// Loop over every block, ensuring that we are sorted and coalesced.
|
||||
auto it = m_memory_block_tree.cbegin();
|
||||
auto prev = it++;
|
||||
while (it != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
const KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
KMemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
next_it = std::next(next_it);
|
||||
// Sequential blocks which can be merged should be merged.
|
||||
if (prev->CanMergeWith(*it)) {
|
||||
return false;
|
||||
}
|
||||
memory_block_tree.erase(it_to_erase);
|
||||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
KMemoryBlock* prev{&(*std::prev(it))};
|
||||
// Sequential blocks should be sequential.
|
||||
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
// If the block is ipc locked, it must have a count.
|
||||
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
||||
cur_info.m_ipc_lock_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
prev->Add(block->GetNumPages());
|
||||
EraseIt(it);
|
||||
// If the block is device shared, it must have a count.
|
||||
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
||||
cur_info.m_device_use_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
it = prev_it;
|
||||
block = prev;
|
||||
// Advance the iterator.
|
||||
prev = it++;
|
||||
}
|
||||
|
||||
// Our loop will miss checking the last block, potentially, so check it.
|
||||
if (prev != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
// If the block is ipc locked, it must have a count.
|
||||
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
||||
prev_info.m_ipc_lock_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the block is device shared, it must have a count.
|
||||
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
||||
prev_info.m_device_use_count == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const KMemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
EraseIt(std::next(it));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,63 +4,154 @@
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <list>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KMemoryBlockManagerUpdateAllocator {
|
||||
public:
|
||||
static constexpr size_t MaxBlocks = 2;
|
||||
|
||||
private:
|
||||
KMemoryBlock* m_blocks[MaxBlocks];
|
||||
size_t m_index;
|
||||
KMemoryBlockSlabManager* m_slab_manager;
|
||||
|
||||
private:
|
||||
Result Initialize(size_t num_blocks) {
|
||||
// Check num blocks.
|
||||
ASSERT(num_blocks <= MaxBlocks);
|
||||
|
||||
// Set index.
|
||||
m_index = MaxBlocks - num_blocks;
|
||||
|
||||
// Allocate the blocks.
|
||||
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
|
||||
m_blocks[m_index + i] = m_slab_manager->Allocate();
|
||||
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
public:
|
||||
KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
|
||||
size_t num_blocks = MaxBlocks)
|
||||
: m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
|
||||
*out_result = this->Initialize(num_blocks);
|
||||
}
|
||||
|
||||
~KMemoryBlockManagerUpdateAllocator() {
|
||||
for (const auto& block : m_blocks) {
|
||||
if (block != nullptr) {
|
||||
m_slab_manager->Free(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KMemoryBlock* Allocate() {
|
||||
ASSERT(m_index < MaxBlocks);
|
||||
ASSERT(m_blocks[m_index] != nullptr);
|
||||
KMemoryBlock* block = nullptr;
|
||||
std::swap(block, m_blocks[m_index++]);
|
||||
return block;
|
||||
}
|
||||
|
||||
void Free(KMemoryBlock* block) {
|
||||
ASSERT(m_index <= MaxBlocks);
|
||||
ASSERT(block != nullptr);
|
||||
if (m_index == 0) {
|
||||
m_slab_manager->Free(block);
|
||||
} else {
|
||||
m_blocks[--m_index] = block;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<KMemoryBlock>;
|
||||
using MemoryBlockTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
||||
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
|
||||
bool right);
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
|
||||
KMemoryBlockManager();
|
||||
|
||||
using HostUnmapCallback = std::function<void(VAddr, u64)>;
|
||||
|
||||
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
|
||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
return m_memory_block_tree.end();
|
||||
}
|
||||
const_iterator end() const {
|
||||
return memory_block_tree.end();
|
||||
return m_memory_block_tree.end();
|
||||
}
|
||||
const_iterator cend() const {
|
||||
return memory_block_tree.cend();
|
||||
return m_memory_block_tree.cend();
|
||||
}
|
||||
|
||||
iterator FindIterator(VAddr addr);
|
||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset, size_t guard_pages) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute);
|
||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm = KMemoryPermission::None,
|
||||
KMemoryAttribute attribute = KMemoryAttribute::None);
|
||||
iterator FindIterator(VAddr address) const {
|
||||
return m_memory_block_tree.find(KMemoryBlock(
|
||||
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
|
||||
}
|
||||
|
||||
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm);
|
||||
const KMemoryBlock* FindBlock(VAddr address) const {
|
||||
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
|
||||
using IterateFunc = std::function<void(const KMemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
KMemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
// Debug.
|
||||
bool CheckState() const;
|
||||
|
||||
private:
|
||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages);
|
||||
|
||||
MemoryBlockTree m_memory_block_tree;
|
||||
VAddr m_start_address{};
|
||||
VAddr m_end_address{};
|
||||
};
|
||||
|
||||
class KScopedMemoryBlockManagerAuditor {
|
||||
public:
|
||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
|
||||
ASSERT(m_manager->CheckState());
|
||||
}
|
||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
|
||||
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
|
||||
~KScopedMemoryBlockManagerAuditor() {
|
||||
ASSERT(m_manager->CheckState());
|
||||
}
|
||||
|
||||
private:
|
||||
void MergeAdjacent(iterator it, iterator& next_it);
|
||||
|
||||
[[maybe_unused]] const VAddr start_addr;
|
||||
[[maybe_unused]] const VAddr end_addr;
|
||||
|
||||
MemoryBlockTree memory_block_tree;
|
||||
KMemoryBlockManager* m_manager;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
|
||||
|
||||
// Set all the allocated memory.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
|
||||
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||
block.GetSize());
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ namespace Kernel {
|
||||
|
||||
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
||||
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
||||
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
|
||||
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,6 +13,7 @@ namespace Kernel {
|
||||
|
||||
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
|
||||
public:
|
||||
explicit KPageBuffer(KernelCore&) {}
|
||||
KPageBuffer() = default;
|
||||
|
||||
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,8 +9,10 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -34,58 +36,66 @@ public:
|
||||
~KPageTable();
|
||||
|
||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
|
||||
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
VAddr code_addr, size_t code_size,
|
||||
KMemoryBlockSlabManager* mem_block_slab_manager,
|
||||
KMemoryManager::Pool pool);
|
||||
|
||||
void Finalize();
|
||||
|
||||
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
Result MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result MapPhysicalMemory(VAddr addr, size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
||||
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state),
|
||||
this->GetRegionSize(state) / PageSize, state, perm));
|
||||
}
|
||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
|
||||
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
Result ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(std::size_t size);
|
||||
Result SetHeapSize(VAddr* out, std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
|
||||
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(size_t size);
|
||||
Result SetHeapSize(VAddr* out, size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
|
||||
VAddr region_start, size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
PAddr map_addr = 0);
|
||||
|
||||
Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
|
||||
bool is_aligned);
|
||||
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
|
||||
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
|
||||
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
|
||||
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
return *m_page_table_impl;
|
||||
}
|
||||
|
||||
const Common::PageTable& PageTableImpl() const {
|
||||
return page_table_impl;
|
||||
return *m_page_table_impl;
|
||||
}
|
||||
|
||||
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
|
||||
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
|
||||
|
||||
private:
|
||||
enum class OperationType : u32 {
|
||||
@@ -96,67 +106,65 @@ private:
|
||||
ChangePermissionsAndRefresh,
|
||||
};
|
||||
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
||||
|
||||
Result InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
|
||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
|
||||
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
|
||||
size_t align);
|
||||
Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
|
||||
PAddr map_addr = 0);
|
||||
VAddr GetRegionAddress(KMemoryState state) const;
|
||||
std::size_t GetRegionSize(KMemoryState state) const;
|
||||
size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset, size_t guard_pages);
|
||||
|
||||
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr);
|
||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
|
||||
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr,
|
||||
ignore_attr));
|
||||
}
|
||||
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr);
|
||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr));
|
||||
}
|
||||
|
||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
@@ -174,13 +182,13 @@ private:
|
||||
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return general_lock.IsLockedByCurrentThread();
|
||||
return m_general_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
|
||||
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
|
||||
@@ -191,95 +199,93 @@ private:
|
||||
return *out != 0;
|
||||
}
|
||||
|
||||
mutable KLightLock general_lock;
|
||||
mutable KLightLock map_physical_memory_lock;
|
||||
|
||||
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||
mutable KLightLock m_general_lock;
|
||||
mutable KLightLock m_map_physical_memory_lock;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
return address_space_start;
|
||||
return m_address_space_start;
|
||||
}
|
||||
constexpr VAddr GetAddressSpaceEnd() const {
|
||||
return address_space_end;
|
||||
return m_address_space_end;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceSize() const {
|
||||
return address_space_end - address_space_start;
|
||||
constexpr size_t GetAddressSpaceSize() const {
|
||||
return m_address_space_end - m_address_space_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionStart() const {
|
||||
return heap_region_start;
|
||||
return m_heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionEnd() const {
|
||||
return heap_region_end;
|
||||
return m_heap_region_end;
|
||||
}
|
||||
constexpr std::size_t GetHeapRegionSize() const {
|
||||
return heap_region_end - heap_region_start;
|
||||
constexpr size_t GetHeapRegionSize() const {
|
||||
return m_heap_region_end - m_heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionStart() const {
|
||||
return alias_region_start;
|
||||
return m_alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionEnd() const {
|
||||
return alias_region_end;
|
||||
return m_alias_region_end;
|
||||
}
|
||||
constexpr std::size_t GetAliasRegionSize() const {
|
||||
return alias_region_end - alias_region_start;
|
||||
constexpr size_t GetAliasRegionSize() const {
|
||||
return m_alias_region_end - m_alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionStart() const {
|
||||
return stack_region_start;
|
||||
return m_stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionEnd() const {
|
||||
return stack_region_end;
|
||||
return m_stack_region_end;
|
||||
}
|
||||
constexpr std::size_t GetStackRegionSize() const {
|
||||
return stack_region_end - stack_region_start;
|
||||
constexpr size_t GetStackRegionSize() const {
|
||||
return m_stack_region_end - m_stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionStart() const {
|
||||
return kernel_map_region_start;
|
||||
return m_kernel_map_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||
return kernel_map_region_end;
|
||||
return m_kernel_map_region_end;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionStart() const {
|
||||
return code_region_start;
|
||||
return m_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionEnd() const {
|
||||
return code_region_end;
|
||||
return m_code_region_end;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||
return alias_code_region_start;
|
||||
return m_alias_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||
return alias_code_region_end - alias_code_region_start;
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
}
|
||||
std::size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(general_lock);
|
||||
return GetHeapSize() + mapped_physical_memory_size;
|
||||
size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||
return address_space_width;
|
||||
constexpr size_t GetAddressSpaceWidth() const {
|
||||
return m_address_space_width;
|
||||
}
|
||||
constexpr std::size_t GetHeapSize() const {
|
||||
return current_heap_end - heap_region_start;
|
||||
constexpr size_t GetHeapSize() const {
|
||||
return m_current_heap_end - m_heap_region_start;
|
||||
}
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
|
||||
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
|
||||
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
|
||||
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
|
||||
}
|
||||
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
|
||||
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||
}
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > heap_region_start && heap_region_end > address;
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
|
||||
return address + size > m_heap_region_start && m_heap_region_end > address;
|
||||
}
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > alias_region_start && alias_region_end > address;
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
|
||||
return address + size > m_alias_region_start && m_alias_region_end > address;
|
||||
}
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
|
||||
if (IsInvalidRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
@@ -291,73 +297,78 @@ public:
|
||||
}
|
||||
return {};
|
||||
}
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
constexpr size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
|
||||
ASSERT(backing_addr);
|
||||
return backing_addr + addr;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||
return address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= address_space_end - 1;
|
||||
constexpr bool Contains(VAddr addr, size_t size) const {
|
||||
return m_address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= m_address_space_end - 1;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool IsKernel() const {
|
||||
return is_kernel;
|
||||
return m_is_kernel;
|
||||
}
|
||||
constexpr bool IsAslrEnabled() const {
|
||||
return is_aslr_enabled;
|
||||
return m_enable_aslr;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||
constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
|
||||
return (m_address_space_start <= addr) &&
|
||||
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr address_space_start{};
|
||||
VAddr address_space_end{};
|
||||
VAddr heap_region_start{};
|
||||
VAddr heap_region_end{};
|
||||
VAddr current_heap_end{};
|
||||
VAddr alias_region_start{};
|
||||
VAddr alias_region_end{};
|
||||
VAddr stack_region_start{};
|
||||
VAddr stack_region_end{};
|
||||
VAddr kernel_map_region_start{};
|
||||
VAddr kernel_map_region_end{};
|
||||
VAddr code_region_start{};
|
||||
VAddr code_region_end{};
|
||||
VAddr alias_code_region_start{};
|
||||
VAddr alias_code_region_end{};
|
||||
VAddr m_address_space_start{};
|
||||
VAddr m_address_space_end{};
|
||||
VAddr m_heap_region_start{};
|
||||
VAddr m_heap_region_end{};
|
||||
VAddr m_current_heap_end{};
|
||||
VAddr m_alias_region_start{};
|
||||
VAddr m_alias_region_end{};
|
||||
VAddr m_stack_region_start{};
|
||||
VAddr m_stack_region_end{};
|
||||
VAddr m_kernel_map_region_start{};
|
||||
VAddr m_kernel_map_region_end{};
|
||||
VAddr m_code_region_start{};
|
||||
VAddr m_code_region_end{};
|
||||
VAddr m_alias_code_region_start{};
|
||||
VAddr m_alias_code_region_end{};
|
||||
|
||||
std::size_t mapped_physical_memory_size{};
|
||||
std::size_t max_heap_size{};
|
||||
std::size_t max_physical_memory_size{};
|
||||
std::size_t address_space_width{};
|
||||
size_t m_mapped_physical_memory_size{};
|
||||
size_t m_max_heap_size{};
|
||||
size_t m_max_physical_memory_size{};
|
||||
size_t m_address_space_width{};
|
||||
|
||||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
KMemoryBlockManager m_memory_block_manager;
|
||||
|
||||
u32 heap_fill_value{};
|
||||
const KMemoryRegion* cached_physical_heap_region{};
|
||||
bool m_is_kernel{};
|
||||
bool m_enable_aslr{};
|
||||
bool m_enable_device_address_space_merge{};
|
||||
|
||||
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
|
||||
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
u32 m_heap_fill_value{};
|
||||
const KMemoryRegion* m_cached_physical_heap_region{};
|
||||
|
||||
Core::System& system;
|
||||
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
|
||||
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
|
||||
|
||||
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
||||
|
||||
Core::System& m_system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||
|
||||
process->name = std::move(process_name);
|
||||
process->resource_limit = res_limit;
|
||||
process->status = ProcessStatus::Created;
|
||||
process->system_resource_address = 0;
|
||||
process->state = State::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||
process->exception_thread = nullptr;
|
||||
process->is_suspended = false;
|
||||
process->schedule_count = 0;
|
||||
process->is_handle_table_initialized = false;
|
||||
|
||||
// Open a reference to the resource limit.
|
||||
process->resource_limit->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::DoWorkerTaskImpl() {
|
||||
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
|
||||
}
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||
page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
|
||||
capacity != pool_size) {
|
||||
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
|
||||
return memory_usage_capacity;
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
|
||||
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsed() const {
|
||||
return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsed() {
|
||||
return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
|
||||
GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
||||
shmem->Open();
|
||||
shemen_info->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate that we're in a state that we can reset.
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
R_UNLESS(is_signaled, ResultInvalidState);
|
||||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate our state.
|
||||
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminating, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == ProcessActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
if (is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(!is_suspended, ResultInvalidState);
|
||||
|
||||
// Suspend all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
ASSERT(activity == ProcessActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
if (!is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(is_suspended, ResultInvalidState);
|
||||
|
||||
// Resume all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
SetSuspended(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
// We currently do not support process-specific system resource
|
||||
UNIMPLEMENTED_IF(system_resource_size != 0);
|
||||
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
return ResultLimitReached;
|
||||
R_RETURN(ResultLimitReached);
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
|
||||
0x8000000, code_size,
|
||||
KMemoryManager::Pool::Application)};
|
||||
if (const Result result{page_table.InitializeForProcess(
|
||||
metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
|
||||
&kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Set memory usage capacity
|
||||
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
|
||||
break;
|
||||
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
|
||||
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
|
||||
page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
|
||||
}
|
||||
|
||||
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
@@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
|
||||
ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
|
||||
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
|
||||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
ChangeState(State::Running);
|
||||
|
||||
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
|
||||
}
|
||||
|
||||
void KProcess::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
ChangeState(State::Terminating);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||
for (auto* thread : in_thread_list) {
|
||||
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
|
||||
|
||||
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
this->DeleteThreadLocalRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
this->DeleteThreadLocalRegion(plr_address);
|
||||
plr_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||
main_thread_stack_size + image_size);
|
||||
}
|
||||
|
||||
ChangeStatus(ProcessStatus::Exited);
|
||||
ChangeState(State::Terminated);
|
||||
}
|
||||
|
||||
void KProcess::Finalize() {
|
||||
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
|
||||
}
|
||||
|
||||
// Finalize the page table.
|
||||
page_table.reset();
|
||||
page_table.Finalize();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
}
|
||||
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
// We succeeded!
|
||||
tlp_guard.Cancel();
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
KThreadLocalPage::Free(kernel, page_to_free);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Svc::MemoryPermission permission) {
|
||||
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
|
||||
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
|
||||
}
|
||||
|
||||
KProcess::KProcess(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
|
||||
kernel_.System())},
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
|
||||
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
|
||||
state_lock{kernel_}, list_lock{kernel_} {}
|
||||
|
||||
KProcess::~KProcess() = default;
|
||||
|
||||
void KProcess::ChangeStatus(ProcessStatus new_status) {
|
||||
if (status == new_status) {
|
||||
void KProcess::ChangeState(State new_state) {
|
||||
if (state == new_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
state = new_state;
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
const VAddr start{page_table.GetStackRegionStart()};
|
||||
const std::size_t size{page_table.GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
page_table.AllocateAndMapMemory(
|
||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_worker_task.h"
|
||||
@@ -31,7 +32,6 @@ class ProgramMetadata;
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KPageTable;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class KSharedMemoryInfo;
|
||||
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
|
||||
BASE = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates the status of a Process instance.
|
||||
*
|
||||
* @note These match the values as used by kernel,
|
||||
* so new entries should only be added if RE
|
||||
* shows that a new value has been introduced.
|
||||
*/
|
||||
enum class ProcessStatus {
|
||||
Created,
|
||||
CreatedWithDebuggerAttached,
|
||||
Running,
|
||||
WaitingForDebuggerToAttach,
|
||||
DebuggerAttached,
|
||||
Exiting,
|
||||
Exited,
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
enum class ProcessActivity : u32 {
|
||||
Runnable,
|
||||
Paused,
|
||||
@@ -89,6 +71,17 @@ public:
|
||||
explicit KProcess(KernelCore& kernel_);
|
||||
~KProcess() override;
|
||||
|
||||
enum class State {
|
||||
Created = static_cast<u32>(Svc::ProcessState::Created),
|
||||
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
|
||||
Running = static_cast<u32>(Svc::ProcessState::Running),
|
||||
Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
|
||||
RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
|
||||
Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
|
||||
Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
|
||||
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
|
||||
};
|
||||
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
@@ -114,12 +107,12 @@ public:
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
return *page_table;
|
||||
return page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& PageTable() const {
|
||||
return *page_table;
|
||||
return page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
@@ -145,26 +138,25 @@ public:
|
||||
}
|
||||
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
VAddr GetTLSRegionAddress() const {
|
||||
return tls_region_address;
|
||||
VAddr GetProcessLocalRegionAddress() const {
|
||||
return plr_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
ProcessStatus GetStatus() const {
|
||||
return status;
|
||||
State GetState() const {
|
||||
return state;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
@@ -286,18 +278,18 @@ public:
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryAvailable() const;
|
||||
u64 GetTotalPhysicalMemoryAvailable();
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryUsed() const;
|
||||
u64 GetTotalPhysicalMemoryUsed();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
std::list<KThread*>& GetThreadList() {
|
||||
@@ -415,19 +407,24 @@ private:
|
||||
pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
/// Changes the process status. If the status is different
|
||||
/// from the current process status, then this will trigger
|
||||
/// a process signal.
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
void FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
handle_table.Finalize();
|
||||
|
||||
// Note that the table is finalized.
|
||||
is_handle_table_initialized = false;
|
||||
}
|
||||
|
||||
void ChangeState(State new_state);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
KPageTable page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
State state{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 process_id = 0;
|
||||
@@ -443,6 +440,8 @@ private:
|
||||
/// Resource limit descriptor for this process
|
||||
KResourceLimit* resource_limit{};
|
||||
|
||||
VAddr system_resource_address{};
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
|
||||
@@ -469,7 +468,7 @@ private:
|
||||
KConditionVariable condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
VAddr plr_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
@@ -495,8 +494,12 @@ private:
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
|
||||
size_t memory_release_hint{};
|
||||
|
||||
bool is_signaled{};
|
||||
bool is_suspended{};
|
||||
bool is_immortal{};
|
||||
bool is_handle_table_initialized{};
|
||||
bool is_initialized{};
|
||||
|
||||
std::atomic<u16> num_running_threads{};
|
||||
|
||||
@@ -81,8 +81,8 @@ void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
|
||||
// HACK: we cannot schedule from this thread, it is not a core thread
|
||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
|
||||
|
||||
// Special case to ensure dummy threads that are waiting block
|
||||
GetCurrentThread(kernel).IfDummyThreadTryWait();
|
||||
// Ensure dummy threads that are waiting block.
|
||||
GetCurrentThread(kernel).DummyThreadBeginWait();
|
||||
|
||||
ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
|
||||
GetCurrentThread(kernel).EnableDispatch();
|
||||
@@ -314,6 +314,16 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
idle_cores &= ~(1ULL << core_id);
|
||||
}
|
||||
|
||||
// HACK: any waiting dummy threads can wake up now.
|
||||
kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads();
|
||||
|
||||
// HACK: if we are a dummy thread, and we need to go sleep, indicate
|
||||
// that for when the lock is released.
|
||||
KThread* const cur_thread = GetCurrentThreadPointer(kernel);
|
||||
if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) {
|
||||
cur_thread->RequestDummyThreadWait();
|
||||
}
|
||||
|
||||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
@@ -531,11 +541,23 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
|
||||
GetPriorityQueue(kernel).Remove(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
|
||||
if (thread->IsDummyThread()) {
|
||||
// HACK: if this is a dummy thread, it should no longer wake up when the
|
||||
// scheduler lock is released.
|
||||
kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread);
|
||||
}
|
||||
} else if (cur_state == ThreadState::Runnable) {
|
||||
// If we're now runnable, then we weren't previously, and we should add.
|
||||
GetPriorityQueue(kernel).PushBack(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
|
||||
if (thread->IsDummyThread()) {
|
||||
// HACK: if this is a dummy thread, it should wake up when the scheduler
|
||||
// lock is released.
|
||||
kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,15 +22,12 @@
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
|
||||
|
||||
static constexpr u32 MessageBufferSize = 0x100;
|
||||
|
||||
KServerSession::KServerSession(KernelCore& kernel_)
|
||||
: KSynchronizationObject{kernel_}, m_lock{kernel_} {}
|
||||
|
||||
@@ -73,59 +70,7 @@ bool KServerSession::IsSignaled() const {
|
||||
}
|
||||
|
||||
// Otherwise, we're signaled if we have a request and aren't handling one.
|
||||
return !m_thread_request_list.empty() && m_current_thread_request == nullptr;
|
||||
}
|
||||
|
||||
void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) {
|
||||
manager->AppendDomainHandler(std::move(handler));
|
||||
}
|
||||
|
||||
std::size_t KServerSession::NumDomainRequestHandlers() const {
|
||||
return manager->DomainHandlerCount();
|
||||
}
|
||||
|
||||
Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
context.SetSessionRequestManager(manager);
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > manager->DomainHandlerCount()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
object_id, name);
|
||||
ASSERT(false);
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*this, context);
|
||||
} else {
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
manager->CloseDomainHandler(object_id - 1);
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
return ResultSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
return !m_request_list.empty() && m_current_request == nullptr;
|
||||
}
|
||||
|
||||
Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
|
||||
@@ -134,43 +79,11 @@ Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& m
|
||||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
|
||||
// Ensure we have a session request handler
|
||||
if (manager->HasSessionRequestHandler(*context)) {
|
||||
if (auto strong_ptr = manager->GetServiceThread().lock()) {
|
||||
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
||||
} else {
|
||||
ASSERT_MSG(false, "strong_ptr is nullptr!");
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "handler is invalid!");
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
return manager->QueueSyncRequest(parent, std::move(context));
|
||||
}
|
||||
|
||||
Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
Result result = ResultSuccess;
|
||||
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (manager->HasSessionRequestHandler(context)) {
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (manager->HasSessionHandler()) {
|
||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
|
||||
IPC::ResponseBuilder rb(context, 2);
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
||||
manager->ConvertToDomain();
|
||||
convert_to_domain = false;
|
||||
}
|
||||
Result result = manager->CompleteSyncRequest(this, context);
|
||||
|
||||
// The calling thread is waiting for this request to complete, so wake it up.
|
||||
context.GetThread().EndWait(result);
|
||||
@@ -178,7 +91,7 @@ Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
return result;
|
||||
}
|
||||
|
||||
Result KServerSession::OnRequest() {
|
||||
Result KServerSession::OnRequest(KSessionRequest* request) {
|
||||
// Create the wait queue.
|
||||
ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
|
||||
|
||||
@@ -198,14 +111,13 @@ Result KServerSession::OnRequest() {
|
||||
this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory);
|
||||
} else {
|
||||
// Non-HLE request.
|
||||
auto* thread{GetCurrentThreadPointer(kernel)};
|
||||
|
||||
// Get whether we're empty.
|
||||
const bool was_empty = m_thread_request_list.empty();
|
||||
const bool was_empty = m_request_list.empty();
|
||||
|
||||
// Add the thread to the list.
|
||||
thread->Open();
|
||||
m_thread_request_list.push_back(thread);
|
||||
// Add the request to the list.
|
||||
request->Open();
|
||||
m_request_list.push_back(*request);
|
||||
|
||||
// If we were empty, signal.
|
||||
if (was_empty) {
|
||||
@@ -213,6 +125,9 @@ Result KServerSession::OnRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a request event, this is asynchronous, and we don't need to wait.
|
||||
R_SUCCEED_IF(request->GetEvent() != nullptr);
|
||||
|
||||
// This is a synchronous request, so we should wait for our request to complete.
|
||||
GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||
GetCurrentThread(kernel).BeginWait(&wait_queue);
|
||||
@@ -223,32 +138,32 @@ Result KServerSession::OnRequest() {
|
||||
|
||||
Result KServerSession::SendReply() {
|
||||
// Lock the session.
|
||||
KScopedLightLock lk(m_lock);
|
||||
KScopedLightLock lk{m_lock};
|
||||
|
||||
// Get the request.
|
||||
KThread* client_thread;
|
||||
KSessionRequest* request;
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Get the current request.
|
||||
client_thread = m_current_thread_request;
|
||||
R_UNLESS(client_thread != nullptr, ResultInvalidState);
|
||||
request = m_current_request;
|
||||
R_UNLESS(request != nullptr, ResultInvalidState);
|
||||
|
||||
// Clear the current request, since we're processing it.
|
||||
m_current_thread_request = nullptr;
|
||||
if (!m_thread_request_list.empty()) {
|
||||
m_current_request = nullptr;
|
||||
if (!m_request_list.empty()) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
|
||||
// Close reference to the request once we're done processing it.
|
||||
SCOPE_EXIT({ client_thread->Close(); });
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Extract relevant information from the request.
|
||||
// const uintptr_t client_message = request->GetAddress();
|
||||
// const size_t client_buffer_size = request->GetSize();
|
||||
// KThread *client_thread = request->GetThread();
|
||||
// KEvent *event = request->GetEvent();
|
||||
const uintptr_t client_message = request->GetAddress();
|
||||
const size_t client_buffer_size = request->GetSize();
|
||||
KThread* client_thread = request->GetThread();
|
||||
KEvent* event = request->GetEvent();
|
||||
|
||||
// Check whether we're closed.
|
||||
const bool closed = (client_thread == nullptr || parent->IsClientClosed());
|
||||
@@ -261,8 +176,8 @@ Result KServerSession::SendReply() {
|
||||
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
|
||||
|
||||
auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
|
||||
auto* dst_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress());
|
||||
std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize);
|
||||
auto* dst_msg_buffer = memory.GetPointer(client_message);
|
||||
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
|
||||
} else {
|
||||
result = ResultSessionClosed;
|
||||
}
|
||||
@@ -278,11 +193,30 @@ Result KServerSession::SendReply() {
|
||||
|
||||
// If there's a client thread, update it.
|
||||
if (client_thread != nullptr) {
|
||||
// End the client thread's wait.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
if (event != nullptr) {
|
||||
// // Get the client process/page table.
|
||||
// KProcess *client_process = client_thread->GetOwnerProcess();
|
||||
// KPageTable *client_page_table = &client_process->PageTable();
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(client_result);
|
||||
// // If we need to, reply with an async error.
|
||||
// if (R_FAILED(client_result)) {
|
||||
// ReplyAsyncError(client_process, client_message, client_buffer_size,
|
||||
// client_result);
|
||||
// }
|
||||
|
||||
// // Unlock the client buffer.
|
||||
// // NOTE: Nintendo does not check the result of this.
|
||||
// client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
|
||||
|
||||
// Signal the event.
|
||||
event->Signal();
|
||||
} else {
|
||||
// End the client thread's wait.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(client_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,10 +225,10 @@ Result KServerSession::SendReply() {
|
||||
|
||||
Result KServerSession::ReceiveRequest() {
|
||||
// Lock the session.
|
||||
KScopedLightLock lk(m_lock);
|
||||
KScopedLightLock lk{m_lock};
|
||||
|
||||
// Get the request and client thread.
|
||||
// KSessionRequest *request;
|
||||
KSessionRequest* request;
|
||||
KThread* client_thread;
|
||||
|
||||
{
|
||||
@@ -304,35 +238,41 @@ Result KServerSession::ReceiveRequest() {
|
||||
R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed);
|
||||
|
||||
// Ensure we aren't already servicing a request.
|
||||
R_UNLESS(m_current_thread_request == nullptr, ResultNotFound);
|
||||
R_UNLESS(m_current_request == nullptr, ResultNotFound);
|
||||
|
||||
// Ensure we have a request to service.
|
||||
R_UNLESS(!m_thread_request_list.empty(), ResultNotFound);
|
||||
R_UNLESS(!m_request_list.empty(), ResultNotFound);
|
||||
|
||||
// Pop the first request from the list.
|
||||
client_thread = m_thread_request_list.front();
|
||||
m_thread_request_list.pop_front();
|
||||
request = &m_request_list.front();
|
||||
m_request_list.pop_front();
|
||||
|
||||
// Get the thread for the request.
|
||||
client_thread = request->GetThread();
|
||||
R_UNLESS(client_thread != nullptr, ResultSessionClosed);
|
||||
|
||||
// Open the client thread.
|
||||
client_thread->Open();
|
||||
}
|
||||
|
||||
// SCOPE_EXIT({ client_thread->Close(); });
|
||||
SCOPE_EXIT({ client_thread->Close(); });
|
||||
|
||||
// Set the request as our current.
|
||||
m_current_thread_request = client_thread;
|
||||
m_current_request = request;
|
||||
|
||||
// Get the client address.
|
||||
uintptr_t client_message = request->GetAddress();
|
||||
size_t client_buffer_size = request->GetSize();
|
||||
// bool recv_list_broken = false;
|
||||
|
||||
// Receive the message.
|
||||
Core::Memory::Memory& memory{kernel.System().Memory()};
|
||||
KThread* server_thread{GetCurrentThreadPointer(kernel)};
|
||||
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
|
||||
|
||||
auto* src_msg_buffer = memory.GetPointer(client_thread->GetTLSAddress());
|
||||
auto* src_msg_buffer = memory.GetPointer(client_message);
|
||||
auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
|
||||
std::memcpy(dst_msg_buffer, src_msg_buffer, MessageBufferSize);
|
||||
std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size);
|
||||
|
||||
// We succeeded.
|
||||
return ResultSuccess;
|
||||
@@ -344,35 +284,34 @@ void KServerSession::CleanupRequests() {
|
||||
// Clean up any pending requests.
|
||||
while (true) {
|
||||
// Get the next request.
|
||||
// KSessionRequest *request = nullptr;
|
||||
KThread* client_thread = nullptr;
|
||||
KSessionRequest* request = nullptr;
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (m_current_thread_request) {
|
||||
if (m_current_request) {
|
||||
// Choose the current request if we have one.
|
||||
client_thread = m_current_thread_request;
|
||||
m_current_thread_request = nullptr;
|
||||
} else if (!m_thread_request_list.empty()) {
|
||||
request = m_current_request;
|
||||
m_current_request = nullptr;
|
||||
} else if (!m_request_list.empty()) {
|
||||
// Pop the request from the front of the list.
|
||||
client_thread = m_thread_request_list.front();
|
||||
m_thread_request_list.pop_front();
|
||||
request = &m_request_list.front();
|
||||
m_request_list.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no request, we're done.
|
||||
if (client_thread == nullptr) {
|
||||
if (request == nullptr) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Close a reference to the request once it's cleaned up.
|
||||
SCOPE_EXIT({ client_thread->Close(); });
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Extract relevant information from the request.
|
||||
// const uintptr_t client_message = request->GetAddress();
|
||||
// const size_t client_buffer_size = request->GetSize();
|
||||
// KThread *client_thread = request->GetThread();
|
||||
// KEvent *event = request->GetEvent();
|
||||
KThread* client_thread = request->GetThread();
|
||||
KEvent* event = request->GetEvent();
|
||||
|
||||
// KProcess *server_process = request->GetServerProcess();
|
||||
// KProcess *client_process = (client_thread != nullptr) ?
|
||||
@@ -385,11 +324,24 @@ void KServerSession::CleanupRequests() {
|
||||
|
||||
// If there's a client thread, update it.
|
||||
if (client_thread != nullptr) {
|
||||
// End the client thread's wait.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
if (event != nullptr) {
|
||||
// // We need to reply async.
|
||||
// ReplyAsyncError(client_process, client_message, client_buffer_size,
|
||||
// (R_SUCCEEDED(result) ? ResultSessionClosed : result));
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(ResultSessionClosed);
|
||||
// // Unlock the client buffer.
|
||||
// NOTE: Nintendo does not check the result of this.
|
||||
// client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
|
||||
|
||||
// Signal the event.
|
||||
event->Signal();
|
||||
} else {
|
||||
// End the client thread's wait.
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(ResultSessionClosed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_session_request.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
@@ -57,44 +58,15 @@ public:
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void OnClientClosed();
|
||||
|
||||
void ClientConnected(SessionRequestHandlerPtr handler) {
|
||||
if (manager) {
|
||||
manager->SetSessionHandler(std::move(handler));
|
||||
}
|
||||
}
|
||||
|
||||
void ClientDisconnected() {
|
||||
manager = nullptr;
|
||||
}
|
||||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
void AppendDomainHandler(SessionRequestHandlerPtr handler);
|
||||
|
||||
/// Retrieves the total number of domain request handlers that have been
|
||||
/// appended to this ServerSession instance.
|
||||
std::size_t NumDomainRequestHandlers() const;
|
||||
|
||||
/// Returns true if the session has been converted to a domain, otherwise False
|
||||
bool IsDomain() const {
|
||||
return manager && manager->IsDomain();
|
||||
}
|
||||
|
||||
/// Converts the session to a domain at the end of the current command
|
||||
void ConvertToDomain() {
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
/// Gets the session request manager, which forwards requests to the underlying service
|
||||
std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() {
|
||||
return manager;
|
||||
}
|
||||
|
||||
/// TODO: flesh these out to match the real kernel
|
||||
Result OnRequest();
|
||||
Result OnRequest(KSessionRequest* request);
|
||||
Result SendReply();
|
||||
Result ReceiveRequest();
|
||||
|
||||
@@ -108,10 +80,6 @@ private:
|
||||
/// Completes a sync request from the emulated application.
|
||||
Result CompleteSyncRequest(HLERequestContext& context);
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// This session's HLE request handlers; if nullptr, this is not an HLE server
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
|
||||
@@ -122,9 +90,8 @@ private:
|
||||
KSession* parent{};
|
||||
|
||||
/// List of threads which are pending a reply.
|
||||
/// FIXME: KSessionRequest
|
||||
std::list<KThread*> m_thread_request_list;
|
||||
KThread* m_current_thread_request{};
|
||||
boost::intrusive::list<KSessionRequest> m_request_list;
|
||||
KSessionRequest* m_current_request;
|
||||
|
||||
KLightLock m_lock;
|
||||
};
|
||||
|
||||
61
src/core/hle/kernel/k_session_request.cpp
Normal file
61
src/core/hle/kernel/k_session_request.cpp
Normal file
@@ -0,0 +1,61 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/hle/kernel/k_page_buffer.h"
|
||||
#include "core/hle/kernel/k_session_request.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size,
|
||||
KMemoryState state, size_t index) {
|
||||
// At most 15 buffers of each type (4-bit descriptor counts).
|
||||
ASSERT(index < ((1ul << 4) - 1) * 3);
|
||||
|
||||
// Get the mapping.
|
||||
Mapping* mapping;
|
||||
if (index < NumStaticMappings) {
|
||||
mapping = &m_static_mappings[index];
|
||||
} else {
|
||||
// Allocate a page for the extra mappings.
|
||||
if (m_mappings == nullptr) {
|
||||
KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel);
|
||||
R_UNLESS(page_buffer != nullptr, ResultOutOfMemory);
|
||||
|
||||
m_mappings = reinterpret_cast<Mapping*>(page_buffer);
|
||||
}
|
||||
|
||||
mapping = &m_mappings[index - NumStaticMappings];
|
||||
}
|
||||
|
||||
// Set the mapping.
|
||||
mapping->Set(client, server, size, state);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size,
|
||||
KMemoryState state) {
|
||||
ASSERT(m_num_recv == 0);
|
||||
ASSERT(m_num_exch == 0);
|
||||
return this->PushMap(client, server, size, state, m_num_send++);
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size,
|
||||
KMemoryState state) {
|
||||
ASSERT(m_num_exch == 0);
|
||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size,
|
||||
KMemoryState state) {
|
||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
|
||||
}
|
||||
|
||||
void KSessionRequest::SessionMappings::Finalize() {
|
||||
if (m_mappings) {
|
||||
KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings));
|
||||
m_mappings = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
306
src/core/hle/kernel/k_session_request.h
Normal file
306
src/core/hle/kernel/k_session_request.h
Normal file
@@ -0,0 +1,306 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSessionRequest final : public KSlabAllocated<KSessionRequest>,
|
||||
public KAutoObject,
|
||||
public boost::intrusive::list_base_hook<> {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
|
||||
|
||||
public:
|
||||
class SessionMappings {
|
||||
private:
|
||||
static constexpr size_t NumStaticMappings = 8;
|
||||
|
||||
class Mapping {
|
||||
public:
|
||||
constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) {
|
||||
m_client_address = c;
|
||||
m_server_address = s;
|
||||
m_size = sz;
|
||||
m_state = st;
|
||||
}
|
||||
|
||||
constexpr VAddr GetClientAddress() const {
|
||||
return m_client_address;
|
||||
}
|
||||
constexpr VAddr GetServerAddress() const {
|
||||
return m_server_address;
|
||||
}
|
||||
constexpr size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
constexpr KMemoryState GetMemoryState() const {
|
||||
return m_state;
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr m_client_address;
|
||||
VAddr m_server_address;
|
||||
size_t m_size;
|
||||
KMemoryState m_state;
|
||||
};
|
||||
|
||||
public:
|
||||
explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {}
|
||||
|
||||
void Initialize() {}
|
||||
void Finalize();
|
||||
|
||||
size_t GetSendCount() const {
|
||||
return m_num_send;
|
||||
}
|
||||
size_t GetReceiveCount() const {
|
||||
return m_num_recv;
|
||||
}
|
||||
size_t GetExchangeCount() const {
|
||||
return m_num_exch;
|
||||
}
|
||||
|
||||
Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state);
|
||||
Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state);
|
||||
Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state);
|
||||
|
||||
VAddr GetSendClientAddress(size_t i) const {
|
||||
return GetSendMapping(i).GetClientAddress();
|
||||
}
|
||||
VAddr GetSendServerAddress(size_t i) const {
|
||||
return GetSendMapping(i).GetServerAddress();
|
||||
}
|
||||
size_t GetSendSize(size_t i) const {
|
||||
return GetSendMapping(i).GetSize();
|
||||
}
|
||||
KMemoryState GetSendMemoryState(size_t i) const {
|
||||
return GetSendMapping(i).GetMemoryState();
|
||||
}
|
||||
|
||||
VAddr GetReceiveClientAddress(size_t i) const {
|
||||
return GetReceiveMapping(i).GetClientAddress();
|
||||
}
|
||||
VAddr GetReceiveServerAddress(size_t i) const {
|
||||
return GetReceiveMapping(i).GetServerAddress();
|
||||
}
|
||||
size_t GetReceiveSize(size_t i) const {
|
||||
return GetReceiveMapping(i).GetSize();
|
||||
}
|
||||
KMemoryState GetReceiveMemoryState(size_t i) const {
|
||||
return GetReceiveMapping(i).GetMemoryState();
|
||||
}
|
||||
|
||||
VAddr GetExchangeClientAddress(size_t i) const {
|
||||
return GetExchangeMapping(i).GetClientAddress();
|
||||
}
|
||||
VAddr GetExchangeServerAddress(size_t i) const {
|
||||
return GetExchangeMapping(i).GetServerAddress();
|
||||
}
|
||||
size_t GetExchangeSize(size_t i) const {
|
||||
return GetExchangeMapping(i).GetSize();
|
||||
}
|
||||
KMemoryState GetExchangeMemoryState(size_t i) const {
|
||||
return GetExchangeMapping(i).GetMemoryState();
|
||||
}
|
||||
|
||||
private:
|
||||
Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index);
|
||||
|
||||
const Mapping& GetSendMapping(size_t i) const {
|
||||
ASSERT(i < m_num_send);
|
||||
|
||||
const size_t index = i;
|
||||
if (index < NumStaticMappings) {
|
||||
return m_static_mappings[index];
|
||||
} else {
|
||||
return m_mappings[index - NumStaticMappings];
|
||||
}
|
||||
}
|
||||
|
||||
const Mapping& GetReceiveMapping(size_t i) const {
|
||||
ASSERT(i < m_num_recv);
|
||||
|
||||
const size_t index = m_num_send + i;
|
||||
if (index < NumStaticMappings) {
|
||||
return m_static_mappings[index];
|
||||
} else {
|
||||
return m_mappings[index - NumStaticMappings];
|
||||
}
|
||||
}
|
||||
|
||||
const Mapping& GetExchangeMapping(size_t i) const {
|
||||
ASSERT(i < m_num_exch);
|
||||
|
||||
const size_t index = m_num_send + m_num_recv + i;
|
||||
if (index < NumStaticMappings) {
|
||||
return m_static_mappings[index];
|
||||
} else {
|
||||
return m_mappings[index - NumStaticMappings];
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
std::array<Mapping, NumStaticMappings> m_static_mappings;
|
||||
Mapping* m_mappings{};
|
||||
u8 m_num_send{};
|
||||
u8 m_num_recv{};
|
||||
u8 m_num_exch{};
|
||||
};
|
||||
|
||||
public:
|
||||
explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
|
||||
|
||||
static KSessionRequest* Create(KernelCore& kernel) {
|
||||
KSessionRequest* req = KSessionRequest::Allocate(kernel);
|
||||
if (req != nullptr) [[likely]] {
|
||||
KAutoObject::Create(req);
|
||||
}
|
||||
return req;
|
||||
}
|
||||
|
||||
void Destroy() override {
|
||||
this->Finalize();
|
||||
KSessionRequest::Free(kernel, this);
|
||||
}
|
||||
|
||||
void Initialize(KEvent* event, uintptr_t address, size_t size) {
|
||||
m_mappings.Initialize();
|
||||
|
||||
m_thread = GetCurrentThreadPointer(kernel);
|
||||
m_event = event;
|
||||
m_address = address;
|
||||
m_size = size;
|
||||
|
||||
m_thread->Open();
|
||||
if (m_event != nullptr) {
|
||||
m_event->Open();
|
||||
}
|
||||
}
|
||||
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
KThread* GetThread() const {
|
||||
return m_thread;
|
||||
}
|
||||
KEvent* GetEvent() const {
|
||||
return m_event;
|
||||
}
|
||||
uintptr_t GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
KProcess* GetServerProcess() const {
|
||||
return m_server;
|
||||
}
|
||||
|
||||
void SetServerProcess(KProcess* process) {
|
||||
m_server = process;
|
||||
m_server->Open();
|
||||
}
|
||||
|
||||
void ClearThread() {
|
||||
m_thread = nullptr;
|
||||
}
|
||||
void ClearEvent() {
|
||||
m_event = nullptr;
|
||||
}
|
||||
|
||||
size_t GetSendCount() const {
|
||||
return m_mappings.GetSendCount();
|
||||
}
|
||||
size_t GetReceiveCount() const {
|
||||
return m_mappings.GetReceiveCount();
|
||||
}
|
||||
size_t GetExchangeCount() const {
|
||||
return m_mappings.GetExchangeCount();
|
||||
}
|
||||
|
||||
Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushSend(client, server, size, state);
|
||||
}
|
||||
|
||||
Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushReceive(client, server, size, state);
|
||||
}
|
||||
|
||||
Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushExchange(client, server, size, state);
|
||||
}
|
||||
|
||||
VAddr GetSendClientAddress(size_t i) const {
|
||||
return m_mappings.GetSendClientAddress(i);
|
||||
}
|
||||
VAddr GetSendServerAddress(size_t i) const {
|
||||
return m_mappings.GetSendServerAddress(i);
|
||||
}
|
||||
size_t GetSendSize(size_t i) const {
|
||||
return m_mappings.GetSendSize(i);
|
||||
}
|
||||
KMemoryState GetSendMemoryState(size_t i) const {
|
||||
return m_mappings.GetSendMemoryState(i);
|
||||
}
|
||||
|
||||
VAddr GetReceiveClientAddress(size_t i) const {
|
||||
return m_mappings.GetReceiveClientAddress(i);
|
||||
}
|
||||
VAddr GetReceiveServerAddress(size_t i) const {
|
||||
return m_mappings.GetReceiveServerAddress(i);
|
||||
}
|
||||
size_t GetReceiveSize(size_t i) const {
|
||||
return m_mappings.GetReceiveSize(i);
|
||||
}
|
||||
KMemoryState GetReceiveMemoryState(size_t i) const {
|
||||
return m_mappings.GetReceiveMemoryState(i);
|
||||
}
|
||||
|
||||
VAddr GetExchangeClientAddress(size_t i) const {
|
||||
return m_mappings.GetExchangeClientAddress(i);
|
||||
}
|
||||
VAddr GetExchangeServerAddress(size_t i) const {
|
||||
return m_mappings.GetExchangeServerAddress(i);
|
||||
}
|
||||
size_t GetExchangeSize(size_t i) const {
|
||||
return m_mappings.GetExchangeSize(i);
|
||||
}
|
||||
KMemoryState GetExchangeMemoryState(size_t i) const {
|
||||
return m_mappings.GetExchangeMemoryState(i);
|
||||
}
|
||||
|
||||
private:
|
||||
// NOTE: This is public and virtual in Nintendo's kernel.
|
||||
void Finalize() override {
|
||||
m_mappings.Finalize();
|
||||
|
||||
if (m_thread) {
|
||||
m_thread->Close();
|
||||
}
|
||||
if (m_event) {
|
||||
m_event->Close();
|
||||
}
|
||||
if (m_server) {
|
||||
m_server->Close();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SessionMappings m_mappings;
|
||||
KThread* m_thread{};
|
||||
KProcess* m_server{};
|
||||
KEvent* m_event{};
|
||||
uintptr_t m_address{};
|
||||
size_t m_size{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||
is_initialized = true;
|
||||
|
||||
// Clear all pages in the memory.
|
||||
std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
|
||||
std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ public:
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
u8* GetPointer(std::size_t offset = 0) {
|
||||
return device_memory->GetPointer(physical_address + offset);
|
||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -63,7 +63,7 @@ public:
|
||||
* @return A pointer to the shared memory block from the specified offset
|
||||
*/
|
||||
const u8* GetPointer(std::size_t offset = 0) const {
|
||||
return device_memory->GetPointer(physical_address + offset);
|
||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
||||
}
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
@@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>,
|
||||
public boost::intrusive::list_base_hook<> {
|
||||
|
||||
public:
|
||||
explicit KSharedMemoryInfo() = default;
|
||||
explicit KSharedMemoryInfo(KernelCore&) {}
|
||||
KSharedMemoryInfo() = default;
|
||||
|
||||
constexpr void Initialize(KSharedMemory* shmem) {
|
||||
shared_memory = shmem;
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -36,28 +37,34 @@ public:
|
||||
}
|
||||
|
||||
void* Allocate() {
|
||||
Node* ret = m_head.load();
|
||||
// KScopedInterruptDisable di;
|
||||
|
||||
do {
|
||||
if (ret == nullptr) {
|
||||
break;
|
||||
}
|
||||
} while (!m_head.compare_exchange_weak(ret, ret->next));
|
||||
m_lock.lock();
|
||||
|
||||
Node* ret = m_head;
|
||||
if (ret != nullptr) [[likely]] {
|
||||
m_head = ret->next;
|
||||
}
|
||||
|
||||
m_lock.unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Free(void* obj) {
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
// KScopedInterruptDisable di;
|
||||
|
||||
Node* cur_head = m_head.load();
|
||||
do {
|
||||
node->next = cur_head;
|
||||
} while (!m_head.compare_exchange_weak(cur_head, node));
|
||||
m_lock.lock();
|
||||
|
||||
Node* node = static_cast<Node*>(obj);
|
||||
node->next = m_head;
|
||||
m_head = node;
|
||||
|
||||
m_lock.unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<Node*> m_head{};
|
||||
Common::SpinLock m_lock;
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "core/hle/kernel/k_worker_task_manager.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
@@ -38,6 +39,9 @@
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
|
||||
|
||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||
u32 entry_point, u32 arg) {
|
||||
context = {};
|
||||
@@ -144,7 +148,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
|
||||
physical_affinity_mask.SetAffinity(phys_core, true);
|
||||
|
||||
// Set the thread state.
|
||||
thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
|
||||
thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
|
||||
? ThreadState::Runnable
|
||||
: ThreadState::Initialized;
|
||||
|
||||
// Set TLS address.
|
||||
tls_address = 0;
|
||||
@@ -241,7 +247,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
||||
@@ -254,7 +260,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
|
||||
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
|
||||
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeDummyThread(KThread* thread) {
|
||||
@@ -264,31 +270,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
|
||||
// Initialize emulation parameters.
|
||||
thread->stack_parameters.disable_count = 0;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||
system.GetCpuManager().GetGuestActivateFunc());
|
||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
||||
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||
system.GetCpuManager().GetIdleThreadStartFunc());
|
||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
||||
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
||||
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
|
||||
system.GetCpuManager().GetShutdownThreadStartFunc());
|
||||
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
|
||||
ThreadType::HighPriority,
|
||||
system.GetCpuManager().GetShutdownThreadStartFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
|
||||
KProcess* owner) {
|
||||
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
|
||||
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
|
||||
}
|
||||
|
||||
void KThread::PostDestroy(uintptr_t arg) {
|
||||
@@ -538,7 +545,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
*out_ideal_core = virtual_ideal_core_id;
|
||||
*out_affinity_mask = virtual_affinity_mask;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
@@ -554,7 +561,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
|
||||
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
@@ -666,7 +673,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
} while (retry_update);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::SetBasePriority(s32 value) {
|
||||
@@ -839,7 +846,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
} while (thread_is_current);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
@@ -874,7 +881,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::AddWaiterImpl(KThread* thread) {
|
||||
@@ -1038,7 +1045,7 @@ Result KThread::Run() {
|
||||
// Set our state and finish.
|
||||
SetState(ThreadState::Runnable);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1073,6 +1080,78 @@ void KThread::Exit() {
|
||||
UNREACHABLE_MSG("KThread::Exit() would return");
|
||||
}
|
||||
|
||||
Result KThread::Terminate() {
|
||||
ASSERT(this != GetCurrentThreadPointer(kernel));
|
||||
|
||||
// Request the thread terminate if it hasn't already.
|
||||
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
|
||||
// If the thread isn't terminated, wait for it to terminate.
|
||||
s32 index;
|
||||
KSynchronizationObject* objects[] = {this};
|
||||
R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
|
||||
Svc::WaitInfinite));
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ThreadState KThread::RequestTerminate() {
|
||||
ASSERT(this != GetCurrentThreadPointer(kernel));
|
||||
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Determine if this is the first termination request.
|
||||
const bool first_request = [&]() -> bool {
|
||||
// Perform an atomic compare-and-swap from false to true.
|
||||
bool expected = false;
|
||||
return termination_requested.compare_exchange_strong(expected, true);
|
||||
}();
|
||||
|
||||
// If this is the first request, start termination procedure.
|
||||
if (first_request) {
|
||||
// If the thread is in initialized state, just change state to terminated.
|
||||
if (this->GetState() == ThreadState::Initialized) {
|
||||
thread_state = ThreadState::Terminated;
|
||||
return ThreadState::Terminated;
|
||||
}
|
||||
|
||||
// Register the terminating dpc.
|
||||
this->RegisterDpc(DpcFlag::Terminating);
|
||||
|
||||
// If the thread is pinned, unpin it.
|
||||
if (this->GetStackParameters().is_pinned) {
|
||||
this->GetOwnerProcess()->UnpinThread(this);
|
||||
}
|
||||
|
||||
// If the thread is suspended, continue it.
|
||||
if (this->IsSuspended()) {
|
||||
suspend_allowed_flags = 0;
|
||||
this->UpdateState();
|
||||
}
|
||||
|
||||
// Change the thread's priority to be higher than any system thread's.
|
||||
if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
|
||||
this->SetBasePriority(TerminatingThreadPriority);
|
||||
}
|
||||
|
||||
// If the thread is runnable, send a termination interrupt to other cores.
|
||||
if (this->GetState() == ThreadState::Runnable) {
|
||||
if (const u64 core_mask =
|
||||
physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
|
||||
core_mask != 0) {
|
||||
Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
|
||||
}
|
||||
}
|
||||
|
||||
// Wake up the thread.
|
||||
if (this->GetState() == ThreadState::Waiting) {
|
||||
wait_queue->CancelWait(this, ResultTerminationRequested, true);
|
||||
}
|
||||
}
|
||||
|
||||
return this->GetState();
|
||||
}
|
||||
|
||||
Result KThread::Sleep(s64 timeout) {
|
||||
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||
@@ -1086,7 +1165,7 @@ Result KThread::Sleep(s64 timeout) {
|
||||
// Check if the thread should terminate.
|
||||
if (this->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Wait for the sleep to end.
|
||||
@@ -1094,33 +1173,32 @@ Result KThread::Sleep(s64 timeout) {
|
||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::IfDummyThreadTryWait() {
|
||||
if (!IsDummyThread()) {
|
||||
return;
|
||||
}
|
||||
void KThread::RequestDummyThreadWait() {
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
ASSERT(this->IsDummyThread());
|
||||
|
||||
if (GetState() != ThreadState::Waiting) {
|
||||
return;
|
||||
}
|
||||
// We will block when the scheduler lock is released.
|
||||
dummy_thread_runnable.store(false);
|
||||
}
|
||||
|
||||
void KThread::DummyThreadBeginWait() {
|
||||
ASSERT(this->IsDummyThread());
|
||||
ASSERT(!kernel.IsPhantomModeForSingleCore());
|
||||
|
||||
// Block until we are no longer waiting.
|
||||
std::unique_lock lk(dummy_wait_lock);
|
||||
dummy_wait_cv.wait(
|
||||
lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
|
||||
// Block until runnable is no longer false.
|
||||
dummy_thread_runnable.wait(false);
|
||||
}
|
||||
|
||||
void KThread::IfDummyThreadEndWait() {
|
||||
if (!IsDummyThread()) {
|
||||
return;
|
||||
}
|
||||
void KThread::DummyThreadEndWait() {
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
ASSERT(this->IsDummyThread());
|
||||
|
||||
// Wake up the waiting thread.
|
||||
dummy_wait_cv.notify_one();
|
||||
dummy_thread_runnable.store(true);
|
||||
dummy_thread_runnable.notify_one();
|
||||
}
|
||||
|
||||
void KThread::BeginWait(KThreadQueue* queue) {
|
||||
@@ -1154,9 +1232,6 @@ void KThread::EndWait(Result wait_result_) {
|
||||
}
|
||||
|
||||
wait_queue->EndWait(this, wait_result_);
|
||||
|
||||
// Special case for dummy threads to wakeup if necessary.
|
||||
IfDummyThreadEndWait();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -180,6 +180,10 @@ public:
|
||||
|
||||
void Exit();
|
||||
|
||||
Result Terminate();
|
||||
|
||||
ThreadState RequestTerminate();
|
||||
|
||||
[[nodiscard]] u32 GetSuspendFlags() const {
|
||||
return suspend_allowed_flags & suspend_request_flags;
|
||||
}
|
||||
@@ -639,8 +643,9 @@ public:
|
||||
// therefore will not block on guest kernel synchronization primitives. These methods handle
|
||||
// blocking as needed.
|
||||
|
||||
void IfDummyThreadTryWait();
|
||||
void IfDummyThreadEndWait();
|
||||
void RequestDummyThreadWait();
|
||||
void DummyThreadBeginWait();
|
||||
void DummyThreadEndWait();
|
||||
|
||||
[[nodiscard]] uintptr_t GetArgument() const {
|
||||
return argument;
|
||||
@@ -773,8 +778,7 @@ private:
|
||||
bool is_single_core{};
|
||||
ThreadType thread_type{};
|
||||
StepState step_state{};
|
||||
std::mutex dummy_wait_lock;
|
||||
std::condition_variable dummy_wait_cv;
|
||||
std::atomic<bool> dummy_thread_runnable{true};
|
||||
|
||||
// For debugging
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
|
||||
@@ -26,7 +26,7 @@ public:
|
||||
static_assert(RegionsPerPage > 0);
|
||||
|
||||
public:
|
||||
constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
|
||||
constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) {
|
||||
m_is_region_free.fill(true);
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||
#include "core/hle/kernel/k_client_port.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
@@ -47,8 +48,8 @@ namespace Kernel {
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system_, KernelCore& kernel_)
|
||||
: time_manager{system_},
|
||||
service_threads_manager{1, "ServiceThreadsManager"}, system{system_} {}
|
||||
: time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"},
|
||||
service_thread_barrier{2}, system{system_} {}
|
||||
|
||||
void SetMulticore(bool is_multi) {
|
||||
is_multicore = is_multi;
|
||||
@@ -73,8 +74,16 @@ struct KernelCore::Impl {
|
||||
InitializeMemoryLayout();
|
||||
Init::InitializeKPageBufferSlabHeap(system);
|
||||
InitializeShutdownThreads();
|
||||
InitializePreemption(kernel);
|
||||
InitializePhysicalCores();
|
||||
InitializePreemption(kernel);
|
||||
|
||||
// Initialize the Dynamic Slab Heaps.
|
||||
{
|
||||
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
|
||||
ASSERT(pt_heap_region.GetEndAddress() != 0);
|
||||
|
||||
InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
|
||||
}
|
||||
|
||||
RegisterHostThread();
|
||||
}
|
||||
@@ -86,6 +95,15 @@ struct KernelCore::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
void CloseCurrentProcess() {
|
||||
(*current_process).Finalize();
|
||||
// current_process->Close();
|
||||
// TODO: The current process should be destroyed based on accurate ref counting after
|
||||
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
||||
(*current_process).Destroy();
|
||||
current_process = nullptr;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||
@@ -99,10 +117,6 @@ struct KernelCore::Impl {
|
||||
next_user_process_id = KProcess::ProcessIDMin;
|
||||
next_thread_id = 1;
|
||||
|
||||
for (auto& core : cores) {
|
||||
core = nullptr;
|
||||
}
|
||||
|
||||
global_handle_table->Finalize();
|
||||
global_handle_table.reset();
|
||||
|
||||
@@ -152,15 +166,7 @@ struct KernelCore::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown all processes.
|
||||
if (current_process) {
|
||||
(*current_process).Finalize();
|
||||
// current_process->Close();
|
||||
// TODO: The current process should be destroyed based on accurate ref counting after
|
||||
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
||||
(*current_process).Destroy();
|
||||
current_process = nullptr;
|
||||
}
|
||||
CloseCurrentProcess();
|
||||
|
||||
// Track kernel objects that were not freed on shutdown
|
||||
{
|
||||
@@ -257,6 +263,18 @@ struct KernelCore::Impl {
|
||||
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
|
||||
}
|
||||
|
||||
void InitializeResourceManagers(VAddr address, size_t size) {
|
||||
dynamic_page_manager = std::make_unique<KDynamicPageManager>();
|
||||
memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
|
||||
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
|
||||
|
||||
dynamic_page_manager->Initialize(address, size);
|
||||
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
||||
memory_block_heap->Initialize(dynamic_page_manager.get(),
|
||||
ApplicationMemoryBlockSlabHeapSize);
|
||||
app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
|
||||
}
|
||||
|
||||
void InitializeShutdownThreads() {
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
shutdown_threads[core_id] = KThread::Create(system.Kernel());
|
||||
@@ -344,11 +362,6 @@ struct KernelCore::Impl {
|
||||
static inline thread_local KThread* current_thread{nullptr};
|
||||
|
||||
KThread* GetCurrentEmuThread() {
|
||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||
if (IsShuttingDown()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto thread_id = GetCurrentHostThreadID();
|
||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||
return GetHostDummyThread();
|
||||
@@ -724,7 +737,12 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void ClearServiceThreads() {
|
||||
service_threads_manager.QueueWork([this]() { service_threads.clear(); });
|
||||
service_threads_manager.QueueWork([this] {
|
||||
service_threads.clear();
|
||||
default_service_thread.reset();
|
||||
service_thread_barrier.Sync();
|
||||
});
|
||||
service_thread_barrier.Sync();
|
||||
}
|
||||
|
||||
std::mutex server_objects_lock;
|
||||
@@ -770,6 +788,11 @@ struct KernelCore::Impl {
|
||||
// Kernel memory management
|
||||
std::unique_ptr<KMemoryManager> memory_manager;
|
||||
|
||||
// Dynamic slab managers
|
||||
std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
|
||||
std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
|
||||
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
|
||||
|
||||
// Shared memory for services
|
||||
Kernel::KSharedMemory* hid_shared_mem{};
|
||||
Kernel::KSharedMemory* font_shared_mem{};
|
||||
@@ -784,6 +807,7 @@ struct KernelCore::Impl {
|
||||
std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
|
||||
std::weak_ptr<ServiceThread> default_service_thread;
|
||||
Common::ThreadWorker service_threads_manager;
|
||||
Common::Barrier service_thread_barrier;
|
||||
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
|
||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||
@@ -853,6 +877,10 @@ const KProcess* KernelCore::CurrentProcess() const {
|
||||
return impl->current_process;
|
||||
}
|
||||
|
||||
void KernelCore::CloseCurrentProcess() {
|
||||
impl->CloseCurrentProcess();
|
||||
}
|
||||
|
||||
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
}
|
||||
@@ -1041,6 +1069,14 @@ const KMemoryManager& KernelCore::MemoryManager() const {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
|
||||
return *impl->app_memory_block_manager;
|
||||
}
|
||||
|
||||
const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
|
||||
return *impl->app_memory_block_manager;
|
||||
}
|
||||
|
||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ class KClientSession;
|
||||
class KEvent;
|
||||
class KHandleTable;
|
||||
class KLinkedListNode;
|
||||
class KMemoryBlockSlabManager;
|
||||
class KMemoryLayout;
|
||||
class KMemoryManager;
|
||||
class KPageBuffer;
|
||||
@@ -46,6 +47,7 @@ class KResourceLimit;
|
||||
class KScheduler;
|
||||
class KServerSession;
|
||||
class KSession;
|
||||
class KSessionRequest;
|
||||
class KSharedMemory;
|
||||
class KSharedMemoryInfo;
|
||||
class KThread;
|
||||
@@ -130,6 +132,9 @@ public:
|
||||
/// Retrieves a const pointer to the current process.
|
||||
const KProcess* CurrentProcess() const;
|
||||
|
||||
/// Closes the current process.
|
||||
void CloseCurrentProcess();
|
||||
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<KProcess*>& GetProcessList() const;
|
||||
|
||||
@@ -238,6 +243,12 @@ public:
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
const KMemoryManager& MemoryManager() const;
|
||||
|
||||
/// Gets the application memory block manager for the kernel.
|
||||
KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
|
||||
|
||||
/// Gets the application memory block manager for the kernel.
|
||||
const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
Kernel::KSharedMemory& GetHidSharedMem();
|
||||
|
||||
@@ -350,6 +361,8 @@ public:
|
||||
return slab_heap_container->page_buffer;
|
||||
} else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
|
||||
return slab_heap_container->thread_local_page;
|
||||
} else if constexpr (std::is_same_v<T, KSessionRequest>) {
|
||||
return slab_heap_container->session_request;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -412,6 +425,7 @@ private:
|
||||
KSlabHeap<KCodeMemory> code_memory;
|
||||
KSlabHeap<KPageBuffer> page_buffer;
|
||||
KSlabHeap<KThreadLocalPage> thread_local_page;
|
||||
KSlabHeap<KSessionRequest> session_request;
|
||||
};
|
||||
|
||||
std::unique_ptr<SlabHeapContainer> slab_heap_container;
|
||||
|
||||
@@ -24,7 +24,7 @@ public:
|
||||
}
|
||||
|
||||
static Derived* Allocate(KernelCore& kernel) {
|
||||
return kernel.SlabHeap<Derived>().Allocate();
|
||||
return kernel.SlabHeap<Derived>().Allocate(kernel);
|
||||
}
|
||||
|
||||
static void Free(KernelCore& kernel, Derived* obj) {
|
||||
|
||||
@@ -751,8 +751,8 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
||||
}
|
||||
|
||||
system.GetReporter().SaveSvcBreakReport(
|
||||
static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger, info1,
|
||||
info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
||||
static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(),
|
||||
info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
|
||||
|
||||
if (!break_reason.signal_debugger) {
|
||||
LOG_CRITICAL(
|
||||
@@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
|
||||
return ResultSuccess;
|
||||
|
||||
case GetInfoType::UserExceptionContextAddr:
|
||||
*result = process->GetTLSRegionAddress();
|
||||
*result = process->GetProcessLocalRegionAddress();
|
||||
return ResultSuccess;
|
||||
|
||||
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
||||
@@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
|
||||
auto* current_process = system.Kernel().CurrentProcess();
|
||||
|
||||
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
||||
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
|
||||
ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
|
||||
"Process has already exited");
|
||||
|
||||
system.Exit();
|
||||
@@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
|
||||
return ResultInvalidEnumValue;
|
||||
}
|
||||
|
||||
*out = static_cast<u64>(process->GetStatus());
|
||||
*out = static_cast<u64>(process->GetState());
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,8 +14,11 @@ namespace Kernel::Svc {
|
||||
|
||||
using namespace Common::Literals;
|
||||
|
||||
constexpr s32 ArgumentHandleCountMax = 0x40;
|
||||
constexpr u32 HandleWaitMask{1u << 30};
|
||||
constexpr inline s32 ArgumentHandleCountMax = 0x40;
|
||||
|
||||
constexpr inline u32 HandleWaitMask = 1u << 30;
|
||||
|
||||
constexpr inline s64 WaitInfinite = -1;
|
||||
|
||||
constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
|
||||
|
||||
|
||||
@@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
|
||||
constexpr inline s32 LowestThreadPriority = 63;
|
||||
constexpr inline s32 HighestThreadPriority = 0;
|
||||
|
||||
constexpr inline s32 SystemThreadPriorityHighest = 16;
|
||||
|
||||
enum class ProcessState : u32 {
|
||||
Created = 0,
|
||||
CreatedAttached = 1,
|
||||
Running = 2,
|
||||
Crashed = 3,
|
||||
RunningAttached = 4,
|
||||
Terminating = 5,
|
||||
Terminated = 6,
|
||||
DebugBreak = 7,
|
||||
};
|
||||
|
||||
constexpr inline size_t ThreadLocalRegionSize = 0x200;
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
|
||||
@@ -135,6 +135,14 @@ union Result {
|
||||
[[nodiscard]] constexpr bool IsFailure() const {
|
||||
return !IsSuccess();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u32 GetInnerValue() const {
|
||||
return static_cast<u32>(module.Value()) | (description << module.bits);
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool Includes(Result result) const {
|
||||
return GetInnerValue() == result.GetInnerValue();
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivial_v<Result>);
|
||||
|
||||
@@ -462,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
|
||||
#define R_UNLESS(expr, res) \
|
||||
{ \
|
||||
if (!(expr)) { \
|
||||
if (res.IsError()) { \
|
||||
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
|
||||
} \
|
||||
R_THROW(res); \
|
||||
} \
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ protected:
|
||||
u32_le size;
|
||||
u32_le library_version;
|
||||
u32_le theme_color;
|
||||
u8 play_startup_sound;
|
||||
bool play_startup_sound;
|
||||
u64_le system_tick;
|
||||
};
|
||||
static_assert(sizeof(CommonArguments) == 0x20, "CommonArguments has incorrect size.");
|
||||
|
||||
@@ -45,9 +45,25 @@ AudCtl::AudCtl(Core::System& system_) : ServiceFramework{system_, "audctl"} {
|
||||
{32, nullptr, "GetActiveOutputTarget"},
|
||||
{33, nullptr, "GetTargetDeviceInfo"},
|
||||
{34, nullptr, "AcquireTargetNotification"},
|
||||
{35, nullptr, "SetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
|
||||
{36, nullptr, "GetHearingProtectionSafeguardTimerRemainingTimeForDebug"},
|
||||
{37, nullptr, "SetHearingProtectionSafeguardEnabled"},
|
||||
{38, nullptr, "IsHearingProtectionSafeguardEnabled"},
|
||||
{39, nullptr, "IsHearingProtectionSafeguardMonitoringOutputForDebug"},
|
||||
{40, nullptr, "GetSystemInformationForDebug"},
|
||||
{41, nullptr, "SetVolumeButtonLongPressTime"},
|
||||
{42, nullptr, "SetNativeVolumeForDebug"},
|
||||
{10000, nullptr, "NotifyAudioOutputTargetForPlayReport"},
|
||||
{10001, nullptr, "NotifyAudioOutputChannelCountForPlayReport"},
|
||||
{10002, nullptr, "NotifyUnsupportedUsbOutputDeviceAttachedForPlayReport"},
|
||||
{10100, nullptr, "GetAudioVolumeDataForPlayReport"},
|
||||
{10101, nullptr, "BindAudioVolumeUpdateEventForPlayReport"},
|
||||
{10102, nullptr, "BindAudioOutputTargetUpdateEventForPlayReport"},
|
||||
{10103, nullptr, "GetAudioOutputTargetForPlayReport"},
|
||||
{10104, nullptr, "GetAudioOutputChannelCountForPlayReport"},
|
||||
{10105, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
|
||||
{10106, nullptr, "GetDefaultAudioOutputTargetForPlayReport"},
|
||||
{50000, nullptr, "SetAnalogInputBoostGainForPrototyping"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
|
||||
@@ -52,6 +52,8 @@ public:
|
||||
{9, &IAudioRenderer::GetRenderingTimeLimit, "GetRenderingTimeLimit"},
|
||||
{10, &IAudioRenderer::RequestUpdate, "RequestUpdateAuto"},
|
||||
{11, nullptr, "ExecuteAudioRendererRendering"},
|
||||
{12, &IAudioRenderer::SetVoiceDropParameter, "SetVoiceDropParameter"},
|
||||
{13, &IAudioRenderer::GetVoiceDropParameter, "GetVoiceDropParameter"},
|
||||
};
|
||||
// clang-format on
|
||||
RegisterHandlers(functions);
|
||||
@@ -205,6 +207,30 @@ private:
|
||||
LOG_DEBUG(Service_Audio, "called");
|
||||
}
|
||||
|
||||
void SetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "called");
|
||||
|
||||
IPC::RequestParser rp{ctx};
|
||||
auto voice_drop_param{rp.Pop<f32>()};
|
||||
|
||||
auto& system_ = impl->GetSystem();
|
||||
system_.SetVoiceDropParameter(voice_drop_param);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
void GetVoiceDropParameter(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "called");
|
||||
|
||||
auto& system_ = impl->GetSystem();
|
||||
auto voice_drop_param{system_.GetVoiceDropParameter()};
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.Push(voice_drop_param);
|
||||
}
|
||||
|
||||
KernelHelpers::ServiceContext service_context;
|
||||
Kernel::KEvent* rendered_event;
|
||||
Manager& manager;
|
||||
|
||||
@@ -745,8 +745,9 @@ void Controller_NPad::SetSupportedNpadIdTypes(u8* data, std::size_t length) {
|
||||
}
|
||||
|
||||
void Controller_NPad::GetSupportedNpadIdTypes(u32* data, std::size_t max_length) {
|
||||
ASSERT(max_length < supported_npad_id_types.size());
|
||||
std::memcpy(data, supported_npad_id_types.data(), supported_npad_id_types.size());
|
||||
const auto copy_amount = supported_npad_id_types.size() * sizeof(u32);
|
||||
ASSERT(max_length <= copy_amount);
|
||||
std::memcpy(data, supported_npad_id_types.data(), copy_amount);
|
||||
}
|
||||
|
||||
std::size_t Controller_NPad::GetSupportedNpadIdTypesSize() const {
|
||||
@@ -867,7 +868,7 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!controller.device->IsVibrationEnabled()) {
|
||||
if (!controller.device->IsVibrationEnabled(device_index)) {
|
||||
if (controller.vibration[device_index].latest_vibration_value.low_amplitude != 0.0f ||
|
||||
controller.vibration[device_index].latest_vibration_value.high_amplitude != 0.0f) {
|
||||
// Send an empty vibration to stop any vibrations.
|
||||
@@ -1000,7 +1001,7 @@ void Controller_NPad::InitializeVibrationDeviceAtIndex(Core::HID::NpadIdType npa
|
||||
}
|
||||
|
||||
controller.vibration[device_index].device_mounted =
|
||||
controller.device->TestVibration(device_index);
|
||||
controller.device->IsVibrationEnabled(device_index);
|
||||
}
|
||||
|
||||
void Controller_NPad::SetPermitVibrationSession(bool permit_vibration_session) {
|
||||
@@ -1501,25 +1502,25 @@ bool Controller_NPad::IsControllerSupported(Core::HID::NpadStyleIndex controller
|
||||
Core::HID::NpadStyleTag style = GetSupportedStyleSet();
|
||||
switch (controller) {
|
||||
case Core::HID::NpadStyleIndex::ProController:
|
||||
return style.fullkey;
|
||||
return style.fullkey.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::JoyconDual:
|
||||
return style.joycon_dual;
|
||||
return style.joycon_dual.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::JoyconLeft:
|
||||
return style.joycon_left;
|
||||
return style.joycon_left.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::JoyconRight:
|
||||
return style.joycon_right;
|
||||
return style.joycon_right.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::GameCube:
|
||||
return style.gamecube;
|
||||
return style.gamecube.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::Pokeball:
|
||||
return style.palma;
|
||||
return style.palma.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::NES:
|
||||
return style.lark;
|
||||
return style.lark.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::SNES:
|
||||
return style.lucia;
|
||||
return style.lucia.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::N64:
|
||||
return style.lagoon;
|
||||
return style.lagoon.As<bool>();
|
||||
case Core::HID::NpadStyleIndex::SegaGenesis:
|
||||
return style.lager;
|
||||
return style.lager.As<bool>();
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -2118,7 +2118,7 @@ void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) {
|
||||
ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
|
||||
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, connection_handle={}, wave_set={}, unkown={}, "
|
||||
"(STUBBED) called, connection_handle={}, wave_set={}, unknown={}, "
|
||||
"t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
|
||||
connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
|
||||
|
||||
|
||||
@@ -37,10 +37,10 @@ private:
|
||||
u8 pointing_status;
|
||||
INSERT_PADDING_BYTES(3);
|
||||
u32 unknown;
|
||||
float unkown_float1;
|
||||
float unknown_float1;
|
||||
float position_x;
|
||||
float position_y;
|
||||
float unkown_float2;
|
||||
float unknown_float2;
|
||||
Core::IrSensor::IrsRect window_of_interest;
|
||||
};
|
||||
static_assert(sizeof(PointingProcessorMarkerData) == 0x20,
|
||||
|
||||
@@ -290,7 +290,7 @@ public:
|
||||
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||
|
||||
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||
if (start_info.GetState() != Kernel::KMemoryState::Free) {
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ public:
|
||||
|
||||
const auto end_info{page_table.QueryInfo(start + size)};
|
||||
|
||||
if (end_info.state != Kernel::KMemoryState::Free) {
|
||||
if (end_info.GetState() != Kernel::KMemoryState::Free) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <mbedtls/hmac_drbg.h>
|
||||
|
||||
#include "common/fs/file.h"
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/service/mii/mii_manager.h"
|
||||
@@ -279,7 +280,7 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) {
|
||||
Common::FS::FileType::BinaryFile};
|
||||
|
||||
if (!keys_file.IsOpen()) {
|
||||
LOG_ERROR(Service_NFP, "No keys detected");
|
||||
LOG_ERROR(Service_NFP, "Failed to open key file");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -295,6 +296,11 @@ bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsKeyAvailable() {
|
||||
const auto yuzu_keys_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::KeysDir);
|
||||
return Common::FS::Exists(yuzu_keys_dir / "key_retail.bin");
|
||||
}
|
||||
|
||||
bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data) {
|
||||
InternalKey locked_secret{};
|
||||
InternalKey unfixed_info{};
|
||||
|
||||
@@ -91,6 +91,9 @@ void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& ou
|
||||
/// Loads both amiibo keys from key_retail.bin
|
||||
bool LoadKeys(InternalKey& locked_secret, InternalKey& unfixed_info);
|
||||
|
||||
/// Returns true if key_retail.bin exist
|
||||
bool IsKeyAvailable();
|
||||
|
||||
/// Decodes encripted amiibo data returns true if output is valid
|
||||
bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& tag_data);
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/service/mii/mii_manager.h"
|
||||
#include "core/hle/service/mii/types.h"
|
||||
#include "core/hle/service/nfp/amiibo_crypto.h"
|
||||
#include "core/hle/service/nfp/nfp.h"
|
||||
#include "core/hle/service/nfp/nfp_device.h"
|
||||
@@ -233,6 +234,14 @@ Result NfpDevice::Mount(MountTarget mount_target_) {
|
||||
return NotAnAmiibo;
|
||||
}
|
||||
|
||||
// Mark amiibos as read only when keys are missing
|
||||
if (!AmiiboCrypto::IsKeyAvailable()) {
|
||||
LOG_ERROR(Service_NFP, "No keys detected");
|
||||
device_state = DeviceState::TagMounted;
|
||||
mount_target = MountTarget::Rom;
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) {
|
||||
LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state);
|
||||
return CorruptedData;
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "core/hle/service/kernel_helpers.h"
|
||||
#include "core/hle/service/mii/types.h"
|
||||
#include "core/hle/service/nfp/nfp_types.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
|
||||
@@ -17,11 +17,6 @@ enum class ServiceType : u32 {
|
||||
System,
|
||||
};
|
||||
|
||||
enum class State : u32 {
|
||||
NonInitialized,
|
||||
Initialized,
|
||||
};
|
||||
|
||||
enum class DeviceState : u32 {
|
||||
Initialized,
|
||||
SearchingForTag,
|
||||
|
||||
@@ -6,12 +6,9 @@
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hid/emulated_controller.h"
|
||||
#include "core/hid/hid_core.h"
|
||||
#include "core/hid/hid_types.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/service/mii/mii_manager.h"
|
||||
#include "core/hle/service/nfp/nfp_device.h"
|
||||
#include "core/hle/service/nfp/nfp_result.h"
|
||||
#include "core/hle/service/nfp/nfp_user.h"
|
||||
|
||||
@@ -4,8 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/service/kernel_helpers.h"
|
||||
#include "core/hle/service/nfp/nfp.h"
|
||||
#include "core/hle/service/nfp/nfp_types.h"
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service::NFP {
|
||||
class NfpDevice;
|
||||
@@ -15,6 +14,11 @@ public:
|
||||
explicit IUser(Core::System& system_);
|
||||
|
||||
private:
|
||||
enum class State : u32 {
|
||||
NonInitialized,
|
||||
Initialized,
|
||||
};
|
||||
|
||||
void Initialize(Kernel::HLERequestContext& ctx);
|
||||
void Finalize(Kernel::HLERequestContext& ctx);
|
||||
void ListDevices(Kernel::HLERequestContext& ctx);
|
||||
|
||||
@@ -255,15 +255,16 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
|
||||
.address = handle_description->address,
|
||||
.size = handle_description->size,
|
||||
.was_uncached = handle_description->flags.map_uncached.Value() != 0,
|
||||
.can_unlock = true,
|
||||
};
|
||||
} else {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
|
||||
// If the handle hasn't been freed from memory, mark that
|
||||
if (!hWeak.expired()) {
|
||||
LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
|
||||
freeInfo.address = 0;
|
||||
freeInfo.can_unlock = false;
|
||||
}
|
||||
|
||||
return freeInfo;
|
||||
|
||||
@@ -105,6 +105,7 @@ public:
|
||||
u64 address; //!< Address the handle referred to before deletion
|
||||
u64 size; //!< Page-aligned handle size
|
||||
bool was_uncached; //!< If the handle was allocated as uncached
|
||||
bool can_unlock; //!< If the address region is ready to be unlocked
|
||||
};
|
||||
|
||||
explicit NvMap(Tegra::Host1x::Host1x& host1x);
|
||||
|
||||
@@ -311,7 +311,8 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
|
||||
handle->address +
|
||||
(static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
|
||||
|
||||
gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
|
||||
gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
|
||||
use_big_pages);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
|
||||
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
|
||||
VAddr cpu_address{mapping->ptr + params.buffer_offset};
|
||||
|
||||
gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
|
||||
gmmu->Map(gpu_address, cpu_address, params.mapping_size,
|
||||
static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
|
||||
|
||||
return NvResult::Success;
|
||||
} catch (const std::out_of_range&) {
|
||||
@@ -389,7 +391,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
|
||||
}
|
||||
|
||||
const bool use_big_pages = alloc->second.big_pages && big_page;
|
||||
gmmu->Map(params.offset, cpu_address, size, use_big_pages);
|
||||
gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
|
||||
use_big_pages);
|
||||
|
||||
auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
|
||||
use_big_pages, alloc->second.sparse)};
|
||||
@@ -409,7 +412,8 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
|
||||
return NvResult::InsufficientMemory;
|
||||
}
|
||||
|
||||
gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
|
||||
gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
|
||||
static_cast<Tegra::PTEKind>(params.kind), big_page);
|
||||
|
||||
auto mapping{
|
||||
std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
|
||||
|
||||
@@ -128,7 +128,8 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
|
||||
}
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.LockForDeviceAddressSpace(handle_description->address, handle_description->size)
|
||||
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
|
||||
Kernel::KMemoryPermission::None, true)
|
||||
.IsSuccess());
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return result;
|
||||
@@ -250,10 +251,12 @@ NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
}
|
||||
|
||||
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||
.IsSuccess());
|
||||
if (freeInfo->can_unlock) {
|
||||
ASSERT(system.CurrentProcess()
|
||||
->PageTable()
|
||||
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||
.IsSuccess());
|
||||
}
|
||||
params.address = freeInfo->address;
|
||||
params.size = static_cast<u32>(freeInfo->size);
|
||||
params.flags.raw = 0;
|
||||
|
||||
@@ -53,7 +53,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
|
||||
}
|
||||
|
||||
Module::Module(Core::System& system)
|
||||
: service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
|
||||
: container{system.Host1x()}, service_context{system, "nvdrv"}, events_interface{*this} {
|
||||
builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
|
||||
std::shared_ptr<Devices::nvdevice> device =
|
||||
std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
|
||||
|
||||
@@ -97,6 +97,9 @@ private:
|
||||
friend class EventInterface;
|
||||
friend class Service::NVFlinger::NVFlinger;
|
||||
|
||||
/// Manages syncpoints on the host
|
||||
NvCore::Container container;
|
||||
|
||||
/// Id to use for the next open file descriptor.
|
||||
DeviceFD next_fd = 1;
|
||||
|
||||
@@ -108,9 +111,6 @@ private:
|
||||
|
||||
EventInterface events_interface;
|
||||
|
||||
/// Manages syncpoints on the host
|
||||
NvCore::Container container;
|
||||
|
||||
std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
|
||||
};
|
||||
|
||||
|
||||
@@ -742,6 +742,13 @@ Status BufferQueueProducer::Disconnect(NativeWindowApi api) {
|
||||
return Status::NoError;
|
||||
}
|
||||
|
||||
// HACK: We are not Android. Remove handle for items in queue, and clear queue.
|
||||
// Allows synchronous destruction of nvmap handles.
|
||||
for (auto& item : core->queue) {
|
||||
nvmap.FreeHandle(item.graphic_buffer->BufferId(), true);
|
||||
}
|
||||
core->queue.clear();
|
||||
|
||||
switch (api) {
|
||||
case NativeWindowApi::Egl:
|
||||
case NativeWindowApi::Cpu:
|
||||
|
||||
@@ -102,15 +102,19 @@ NVFlinger::~NVFlinger() {
|
||||
system.CoreTiming().UnscheduleEvent(single_composition_event, {});
|
||||
}
|
||||
|
||||
ShutdownLayers();
|
||||
|
||||
if (nvdrv) {
|
||||
nvdrv->Close(disp_fd);
|
||||
}
|
||||
}
|
||||
|
||||
void NVFlinger::ShutdownLayers() {
|
||||
for (auto& display : displays) {
|
||||
for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
|
||||
display.GetLayer(layer).Core().NotifyShutdown();
|
||||
}
|
||||
}
|
||||
|
||||
if (nvdrv) {
|
||||
nvdrv->Close(disp_fd);
|
||||
}
|
||||
}
|
||||
|
||||
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||
@@ -134,6 +138,19 @@ std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
||||
return itr->GetID();
|
||||
}
|
||||
|
||||
bool NVFlinger::CloseDisplay(u64 display_id) {
|
||||
const auto lock_guard = Lock();
|
||||
auto* const display = FindDisplay(display_id);
|
||||
|
||||
if (display == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
display->Reset();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
|
||||
const auto lock_guard = Lock();
|
||||
auto* const display = FindDisplay(display_id);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user