Compare commits
235 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0d6a8824d0 | ||
|
|
e4d55e4ee4 | ||
|
|
c4bc7ce7e2 | ||
|
|
211da31b34 | ||
|
|
c973029374 | ||
|
|
5eb30c7827 | ||
|
|
c9bb888adf | ||
|
|
d05b183f21 | ||
|
|
83eb9cf7da | ||
|
|
0e84fd95e2 | ||
|
|
bb55d2e701 | ||
|
|
0c176ce828 | ||
|
|
26a1d4fc37 | ||
|
|
ea41c53ab1 | ||
|
|
766941f1a3 | ||
|
|
d03afd6f4b | ||
|
|
4eece4d35d | ||
|
|
770f23db34 | ||
|
|
cbaf642ffe | ||
|
|
d581a4a367 | ||
|
|
3161b34ff6 | ||
|
|
71c0e20f95 | ||
|
|
ad9e5bc5b7 | ||
|
|
a4472b5526 | ||
|
|
6a0d8b2aa1 | ||
|
|
aa55c62159 | ||
|
|
dc520a487d | ||
|
|
aa97f39ba8 | ||
|
|
df38c03a09 | ||
|
|
4c198bbf06 | ||
|
|
cf0f821565 | ||
|
|
8baf036cdc | ||
|
|
7283010305 | ||
|
|
a5d8703235 | ||
|
|
6f6be615f3 | ||
|
|
862afa8514 | ||
|
|
e6fe40428c | ||
|
|
85527cc7c7 | ||
|
|
d7d7ae8219 | ||
|
|
717c8ded82 | ||
|
|
9fc1bcc7b2 | ||
|
|
75596c07e0 | ||
|
|
ece22fcbc7 | ||
|
|
38e4382f53 | ||
|
|
37de88040c | ||
|
|
05ae0cab0e | ||
|
|
119315af08 | ||
|
|
661fe06d9d | ||
|
|
ba21ba0c5c | ||
|
|
32d7faafa8 | ||
|
|
b7b47f3099 | ||
|
|
6f941121e6 | ||
|
|
6636b81573 | ||
|
|
1f21fa866d | ||
|
|
84d130f143 | ||
|
|
d928ba8e40 | ||
|
|
3aab7d4473 | ||
|
|
6b6c02f541 | ||
|
|
50bfacca88 | ||
|
|
0cb9bc12fc | ||
|
|
6257461684 | ||
|
|
d353c45f7d | ||
|
|
f76b4417e6 | ||
|
|
0897f4f96c | ||
|
|
6d4f411c08 | ||
|
|
37b17252d1 | ||
|
|
ddd3f48736 | ||
|
|
46322be735 | ||
|
|
3794851f7f | ||
|
|
74275d0968 | ||
|
|
ca0d9ef4b8 | ||
|
|
846b6fba82 | ||
|
|
75ab52f05b | ||
|
|
83f649240e | ||
|
|
f325fcb131 | ||
|
|
5e4ab2d42c | ||
|
|
de4afde065 | ||
|
|
77b74f5d95 | ||
|
|
633411c20f | ||
|
|
2228383322 | ||
|
|
7aa91c8d9c | ||
|
|
7837185f0a | ||
|
|
983f2b7074 | ||
|
|
7f0d0dd177 | ||
|
|
b42b894785 | ||
|
|
4e9adae5da | ||
|
|
f39d2cf78b | ||
|
|
d8ff939edc | ||
|
|
eec3184bb0 | ||
|
|
67e0d38152 | ||
|
|
808e22984f | ||
|
|
e09756b2df | ||
|
|
8f00c59462 | ||
|
|
1cdd2d5204 | ||
|
|
ccfdb7c1af | ||
|
|
6f0f7f1547 | ||
|
|
316a2c1715 | ||
|
|
d867ae5ab6 | ||
|
|
c4d91488d9 | ||
|
|
72bff8ba11 | ||
|
|
84d15c7f47 | ||
|
|
dce242858a | ||
|
|
8ce6256722 | ||
|
|
3e6840a74c | ||
|
|
25d53e66d1 | ||
|
|
20118075c5 | ||
|
|
2cdfbbc07d | ||
|
|
cdb9fe978f | ||
|
|
86a3a0b1b4 | ||
|
|
f6e7cae62c | ||
|
|
2dd6a2352d | ||
|
|
e0ec9ffc36 | ||
|
|
041eb5bf57 | ||
|
|
8b4d5aeb4f | ||
|
|
d8e3380ea5 | ||
|
|
e59bd6c335 | ||
|
|
77803d96be | ||
|
|
fa9b7db76f | ||
|
|
0ec1801bc1 | ||
|
|
fa913a702f | ||
|
|
3c38bd7cf0 | ||
|
|
165bce3c2d | ||
|
|
1a378a7769 | ||
|
|
cbb6c24215 | ||
|
|
1689e0a71f | ||
|
|
2e782a154d | ||
|
|
0313ee7793 | ||
|
|
05f2673648 | ||
|
|
2d90a927c9 | ||
|
|
2ccbf5abdd | ||
|
|
120cd450e5 | ||
|
|
f51c71e956 | ||
|
|
bb31b0f261 | ||
|
|
f86774c1ac | ||
|
|
42c4ef7373 | ||
|
|
c7e079a5d4 | ||
|
|
6908ea2284 | ||
|
|
347432524c | ||
|
|
b02c3f2314 | ||
|
|
3822e31323 | ||
|
|
cae108404a | ||
|
|
bad3025951 | ||
|
|
f3c40f4a20 | ||
|
|
e6ab1f673b | ||
|
|
93297d14d8 | ||
|
|
91c410c918 | ||
|
|
496695618a | ||
|
|
0860fffd78 | ||
|
|
2f90694797 | ||
|
|
3e0aaeba98 | ||
|
|
82fdfb33ac | ||
|
|
1f54cd4ac7 | ||
|
|
efaedcab31 | ||
|
|
49682a0481 | ||
|
|
aa9e9052a6 | ||
|
|
93a7058d8e | ||
|
|
f16db300c6 | ||
|
|
969387a79a | ||
|
|
3968faec06 | ||
|
|
7f66050f0c | ||
|
|
0b181eeef4 | ||
|
|
6b71530fa8 | ||
|
|
a6628e8dba | ||
|
|
9e16837088 | ||
|
|
c0b1bdd237 | ||
|
|
d4c0b7b437 | ||
|
|
7daf751b8d | ||
|
|
9524e28d20 | ||
|
|
fca195b4fb | ||
|
|
3efb8eb2dc | ||
|
|
5ffb8b8039 | ||
|
|
925fb63478 | ||
|
|
560bca57a2 | ||
|
|
97879faea4 | ||
|
|
470e89a8ed | ||
|
|
7bd3930939 | ||
|
|
b8a70c9999 | ||
|
|
3cb4498142 | ||
|
|
a264b54022 | ||
|
|
638fa6170a | ||
|
|
11f85ea713 | ||
|
|
829e82e264 | ||
|
|
a4d11f4427 | ||
|
|
1b787adbd0 | ||
|
|
abcc009dff | ||
|
|
79bcb38321 | ||
|
|
8d4e026d05 | ||
|
|
ff26190d42 | ||
|
|
d00245d444 | ||
|
|
1baedfa12c | ||
|
|
ed591934fb | ||
|
|
58eb6953d1 | ||
|
|
2bb41cffca | ||
|
|
57a77e9ff4 | ||
|
|
d02ccfb15d | ||
|
|
9ec5f75f43 | ||
|
|
345b9e6a08 | ||
|
|
25dcaf1eca | ||
|
|
113a5ed68f | ||
|
|
47b8160666 | ||
|
|
cb073f95dc | ||
|
|
e63a5459e3 | ||
|
|
6e1c6297a3 | ||
|
|
b6119a55f9 | ||
|
|
0cfd90004b | ||
|
|
2cc9d94060 | ||
|
|
0101ef9fb1 | ||
|
|
9393f90ccf | ||
|
|
5000d814af | ||
|
|
8649c46c74 | ||
|
|
1deb997eba | ||
|
|
282cd3e5fe | ||
|
|
23b6569fc2 | ||
|
|
99507d0188 | ||
|
|
88ccdaf10a | ||
|
|
bffbaddb79 | ||
|
|
c75a4bdeaa | ||
|
|
2f37c7948f | ||
|
|
f107e58fde | ||
|
|
c70e1d0247 | ||
|
|
ae453ab6a8 | ||
|
|
20139f8a55 | ||
|
|
4b773b15a6 | ||
|
|
9fe077635e | ||
|
|
5c7eef3756 | ||
|
|
ddf5577799 | ||
|
|
f706b3bd24 | ||
|
|
d574bb4610 | ||
|
|
b0ba1a0b65 | ||
|
|
0ba03d1b3a | ||
|
|
fcebd36cde | ||
|
|
40af1111c2 | ||
|
|
d4cb0eac87 | ||
|
|
c864cb5772 | ||
|
|
9a95c7fa14 |
@@ -3,15 +3,6 @@
|
||||
# SPDX-FileCopyrightText: 2021 yuzu Emulator Project
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# Setup RC file for tx
|
||||
cat << EOF > ~/.transifexrc
|
||||
[https://www.transifex.com]
|
||||
hostname = https://www.transifex.com
|
||||
username = api
|
||||
password = $TRANSIFEX_API_TOKEN
|
||||
EOF
|
||||
|
||||
|
||||
set -x
|
||||
|
||||
echo -e "\e[1m\e[33mBuild tools information:\e[0m"
|
||||
@@ -19,9 +10,6 @@ cmake --version
|
||||
gcc -v
|
||||
tx --version
|
||||
|
||||
# vcpkg needs these: curl zip unzip tar, have tar
|
||||
apt-get install -y curl zip unzip
|
||||
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_QT_TRANSLATION=ON -DGENERATE_QT_TRANSLATION=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_SDL2=OFF -DYUZU_TESTS=OFF -DYUZU_USE_BUNDLED_VCPKG=ON
|
||||
make translation
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
fetch-depth: 0
|
||||
fetch-depth: 0
|
||||
- name: Update Translation
|
||||
run: ./.ci/scripts/transifex/docker.sh
|
||||
env:
|
||||
TRANSIFEX_API_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }}
|
||||
TX_TOKEN: ${{ secrets.TRANSIFEX_API_TOKEN }}
|
||||
|
||||
reuse:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -133,13 +133,13 @@ if (NOT ENABLE_GENERIC)
|
||||
if (MSVC)
|
||||
detect_architecture("_M_AMD64" x86_64)
|
||||
detect_architecture("_M_IX86" x86)
|
||||
detect_architecture("_M_ARM" ARM)
|
||||
detect_architecture("_M_ARM64" ARM64)
|
||||
detect_architecture("_M_ARM" arm)
|
||||
detect_architecture("_M_ARM64" arm64)
|
||||
else()
|
||||
detect_architecture("__x86_64__" x86_64)
|
||||
detect_architecture("__i386__" x86)
|
||||
detect_architecture("__arm__" ARM)
|
||||
detect_architecture("__aarch64__" ARM64)
|
||||
detect_architecture("__arm__" arm)
|
||||
detect_architecture("__aarch64__" arm64)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -218,11 +218,11 @@ if(ENABLE_QT)
|
||||
set(QT_VERSION 5.15)
|
||||
|
||||
# Check for system Qt on Linux, fallback to bundled Qt
|
||||
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
|
||||
if (UNIX AND NOT APPLE)
|
||||
if (NOT YUZU_USE_BUNDLED_QT)
|
||||
find_package(Qt5 ${QT_VERSION} COMPONENTS Widgets DBus Multimedia)
|
||||
endif()
|
||||
if (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT)
|
||||
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT))
|
||||
# Check for dependencies, then enable bundled Qt download
|
||||
|
||||
# Check that the system GLIBCXX version is compatible
|
||||
@@ -323,7 +323,7 @@ if(ENABLE_QT)
|
||||
|
||||
set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")
|
||||
endif()
|
||||
if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT)
|
||||
if (UNIX AND NOT APPLE AND YUZU_USE_BUNDLED_QT)
|
||||
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
|
||||
else()
|
||||
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
|
||||
@@ -541,9 +541,9 @@ add_definitions(-DBOOST_ERROR_CODE_HEADER_ONLY
|
||||
# Adjustments for MSVC + Ninja
|
||||
if (MSVC AND CMAKE_GENERATOR STREQUAL "Ninja")
|
||||
add_compile_options(
|
||||
/wd4711 # function 'function' selected for automatic inline expansion
|
||||
/wd4464 # relative include path contains '..'
|
||||
/wd4820 # 'identifier1': '4' bytes padding added after data member 'identifier2'
|
||||
/wd4711 # function 'function' selected for automatic inline expansion
|
||||
/wd4820 # 'bytes' bytes padding added after construct 'member_name'
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
2
dist/languages/.tx/config
vendored
2
dist/languages/.tx/config
vendored
@@ -1,7 +1,7 @@
|
||||
[main]
|
||||
host = https://www.transifex.com
|
||||
|
||||
[yuzu.emulator]
|
||||
[o:yuzu-emulator:p:yuzu:r:emulator]
|
||||
file_filter = <lang>.ts
|
||||
source_file = en.ts
|
||||
source_lang = en
|
||||
|
||||
4
dist/languages/README.md
vendored
4
dist/languages/README.md
vendored
@@ -1 +1,3 @@
|
||||
This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically. Do not directly open PRs on github to modify the translation.
|
||||
This directory stores translation patches (TS files) for yuzu Qt frontend. This directory is linked with [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu), so you can update the translation by executing `tx pull -t -a`. If you want to contribute to the translation, please go the transifex link and submit your translation there. This directory on the main repo will be synchronized with transifex periodically.
|
||||
|
||||
Do not directly open PRs on github to modify the translation.
|
||||
|
||||
7321
dist/languages/uk.ts
vendored
Normal file
7321
dist/languages/uk.ts
vendored
Normal file
File diff suppressed because it is too large
Load Diff
11
externals/CMakeLists.txt
vendored
11
externals/CMakeLists.txt
vendored
@@ -7,15 +7,14 @@ include(DownloadExternals)
|
||||
|
||||
# xbyak
|
||||
if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
|
||||
add_library(xbyak INTERFACE)
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_include_directories(xbyak SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
|
||||
add_subdirectory(xbyak)
|
||||
endif()
|
||||
|
||||
# Dynarmic
|
||||
if (ARCHITECTURE_x86_64)
|
||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||
if (ARCHITECTURE_arm64)
|
||||
set(DYNARMIC_FRONTENDS "A32")
|
||||
endif()
|
||||
set(DYNARMIC_NO_BUNDLED_FMT ON)
|
||||
set(DYNARMIC_IGNORE_ASSERTS ON CACHE BOOL "" FORCE)
|
||||
add_subdirectory(dynarmic)
|
||||
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 2d4602a651...424fdb5c50
2
externals/xbyak
vendored
2
externals/xbyak
vendored
Submodule externals/xbyak updated: c306b8e578...348e3e548e
@@ -58,13 +58,11 @@ if (MSVC)
|
||||
|
||||
# Warnings
|
||||
/W3
|
||||
/we4018 # 'expression': signed/unsigned mismatch
|
||||
/WX
|
||||
|
||||
/we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled
|
||||
/we4101 # 'identifier': unreferenced local variable
|
||||
/we4189 # 'identifier': local variable is initialized but not referenced
|
||||
/we4265 # 'class': class has virtual functions, but destructor is not virtual
|
||||
/we4267 # 'var': conversion from 'size_t' to 'type', possible loss of data
|
||||
/we4305 # 'context': truncation from 'type1' to 'type2'
|
||||
/we4388 # 'expression': signed/unsigned mismatch
|
||||
/we4389 # 'operator': signed/unsigned mismatch
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
@@ -75,10 +73,13 @@ if (MSVC)
|
||||
/we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
|
||||
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
|
||||
/we4555 # Expression has no effect; expected expression with side-effect
|
||||
/we4715 # 'function': not all control paths return a value
|
||||
/we4834 # Discarding return value of function with 'nodiscard' attribute
|
||||
/we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
|
||||
/we5038 # data member 'member1' will be initialized after data member 'member2'
|
||||
/we5233 # explicit lambda capture 'identifier' is not used
|
||||
/we5245 # 'function': unreferenced function with internal linkage has been removed
|
||||
|
||||
/wd4100 # 'identifier': unreferenced formal parameter
|
||||
/wd4324 # 'struct_name': structure was padded due to __declspec(align())
|
||||
)
|
||||
|
||||
if (USE_CCACHE)
|
||||
@@ -99,24 +100,18 @@ if (MSVC)
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
|
||||
else()
|
||||
add_compile_options(
|
||||
-Wall
|
||||
-Werror=array-bounds
|
||||
-Werror=implicit-fallthrough
|
||||
-Werror=all
|
||||
-Werror=extra
|
||||
-Werror=missing-declarations
|
||||
-Werror=missing-field-initializers
|
||||
-Werror=reorder
|
||||
-Werror=shadow
|
||||
-Werror=sign-compare
|
||||
-Werror=switch
|
||||
-Werror=uninitialized
|
||||
-Werror=unused-function
|
||||
-Werror=unused-result
|
||||
-Werror=unused-variable
|
||||
-Wextra
|
||||
-Wmissing-declarations
|
||||
-Werror=unused
|
||||
|
||||
-Wno-attributes
|
||||
-Wno-invalid-offsetof
|
||||
-Wno-unused-parameter
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-braced-scalar-init>
|
||||
$<$<CXX_COMPILER_ID:Clang>:-Wno-unused-private-field>
|
||||
)
|
||||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
|
||||
@@ -206,27 +206,18 @@ if (MSVC)
|
||||
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
/we4457 # Declaration of 'identifier' hides function parameter
|
||||
/we4458 # Declaration of 'identifier' hides class member
|
||||
/we4459 # Declaration of 'identifier' hides global declaration
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(audio_core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
-Werror=shadow
|
||||
-Werror=unused-variable
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
-Wno-sign-conversion
|
||||
)
|
||||
endif()
|
||||
|
||||
target_link_libraries(audio_core PUBLIC common core)
|
||||
if (ARCHITECTURE_x86_64)
|
||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||
target_link_libraries(audio_core PRIVATE dynarmic)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ Result System::IsConfigValid(const std::string_view device_name,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result System::Initialize(std::string& device_name, const AudioInParameter& in_params,
|
||||
Result System::Initialize(std::string device_name, const AudioInParameter& in_params,
|
||||
const u32 handle_, const u64 applet_resource_user_id_) {
|
||||
auto result{IsConfigValid(device_name, in_params)};
|
||||
if (result.IsError()) {
|
||||
|
||||
@@ -97,7 +97,7 @@ public:
|
||||
* @param applet_resource_user_id - Unused.
|
||||
* @return Result code.
|
||||
*/
|
||||
Result Initialize(std::string& device_name, const AudioInParameter& in_params, u32 handle,
|
||||
Result Initialize(std::string device_name, const AudioInParameter& in_params, u32 handle,
|
||||
u64 applet_resource_user_id);
|
||||
|
||||
/**
|
||||
|
||||
@@ -49,8 +49,8 @@ Result System::IsConfigValid(std::string_view device_name,
|
||||
return Service::Audio::ERR_INVALID_CHANNEL_COUNT;
|
||||
}
|
||||
|
||||
Result System::Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle_,
|
||||
u64& applet_resource_user_id_) {
|
||||
Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_,
|
||||
u64 applet_resource_user_id_) {
|
||||
auto result = IsConfigValid(device_name, in_params);
|
||||
if (result.IsError()) {
|
||||
return result;
|
||||
|
||||
@@ -88,8 +88,8 @@ public:
|
||||
* @param applet_resource_user_id - Unused.
|
||||
* @return Result code.
|
||||
*/
|
||||
Result Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle,
|
||||
u64& applet_resource_user_id);
|
||||
Result Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle,
|
||||
u64 applet_resource_user_id);
|
||||
|
||||
/**
|
||||
* Start this system.
|
||||
|
||||
@@ -91,7 +91,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
|
||||
voice_info.Initialize();
|
||||
|
||||
for (u32 channel = 0; channel < in_param.channel_count; channel++) {
|
||||
std::memset(voice_states[channel], 0, sizeof(VoiceState));
|
||||
*voice_states[channel] = {};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor
|
||||
void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
|
||||
auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)};
|
||||
if (needs_init) {
|
||||
std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState));
|
||||
*state_ = {};
|
||||
}
|
||||
|
||||
auto input_buffer{
|
||||
|
||||
@@ -30,7 +30,7 @@ void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& proc
|
||||
for (u32 i = 0; i < filter_tap_count; i++) {
|
||||
auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])};
|
||||
if (needs_init[i]) {
|
||||
std::memset(state, 0, sizeof(VoiceState::BiquadFilterState));
|
||||
*state = {};
|
||||
}
|
||||
|
||||
ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state,
|
||||
|
||||
@@ -98,9 +98,8 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
|
||||
: core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
|
||||
|
||||
Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size,
|
||||
const u32 process_handle_, const u64 applet_resource_user_id_,
|
||||
const s32 session_id_) {
|
||||
Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
|
||||
u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
|
||||
if (!CheckValidRevision(params.revision)) {
|
||||
return Service::Audio::ERR_INVALID_REVISION;
|
||||
}
|
||||
@@ -354,6 +353,8 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
|
||||
render_time_limit_percent = 100;
|
||||
drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
|
||||
drop_voice_param = 1.0f;
|
||||
num_voices_dropped = 0;
|
||||
|
||||
allocator.Align(0x40);
|
||||
command_workbuffer_size = allocator.GetRemainingSize();
|
||||
@@ -547,7 +548,7 @@ u32 System::GetRenderingTimeLimit() const {
|
||||
return render_time_limit_percent;
|
||||
}
|
||||
|
||||
void System::SetRenderingTimeLimit(const u32 limit) {
|
||||
void System::SetRenderingTimeLimit(u32 limit) {
|
||||
render_time_limit_percent = limit;
|
||||
}
|
||||
|
||||
@@ -635,7 +636,7 @@ void System::SendCommandToDsp() {
|
||||
}
|
||||
|
||||
u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
[[maybe_unused]] const u64 command_buffer_size_) {
|
||||
[[maybe_unused]] u64 command_buffer_size_) {
|
||||
PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
|
||||
const auto start_time{core.CoreTiming().GetClockTicks()};
|
||||
|
||||
@@ -693,7 +694,8 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
|
||||
voice_context.SortInfo();
|
||||
|
||||
const auto start_estimated_time{command_buffer.estimated_process_time};
|
||||
const auto start_estimated_time{drop_voice_param *
|
||||
static_cast<f32>(command_buffer.estimated_process_time)};
|
||||
|
||||
command_generator.GenerateVoiceCommands();
|
||||
command_generator.GenerateSubMixCommands();
|
||||
@@ -712,11 +714,16 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
|
||||
time_limit_percent = 70.0f;
|
||||
}
|
||||
|
||||
const auto end_estimated_time{drop_voice_param *
|
||||
static_cast<f32>(command_buffer.estimated_process_time)};
|
||||
const auto estimated_time{start_estimated_time - end_estimated_time};
|
||||
|
||||
const auto time_limit{static_cast<u32>(
|
||||
static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) +
|
||||
(((time_limit_percent / 100.0f) * 2'880'000.0) *
|
||||
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
|
||||
num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit);
|
||||
estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
|
||||
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
|
||||
num_voices_dropped =
|
||||
DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
|
||||
}
|
||||
|
||||
command_list_header->buffer_size = command_buffer.size;
|
||||
@@ -737,24 +744,33 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
|
||||
return command_buffer.size;
|
||||
}
|
||||
|
||||
u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time,
|
||||
const u32 time_limit) {
|
||||
f32 System::GetVoiceDropParameter() const {
|
||||
return drop_voice_param;
|
||||
}
|
||||
|
||||
void System::SetVoiceDropParameter(f32 voice_drop_) {
|
||||
drop_voice_param = voice_drop_;
|
||||
}
|
||||
|
||||
u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
|
||||
u32 i{0};
|
||||
auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
|
||||
ICommand* cmd{};
|
||||
ICommand* cmd{nullptr};
|
||||
|
||||
for (; i < command_buffer.count; i++) {
|
||||
// Find a first valid voice to drop
|
||||
while (i < command_buffer.count) {
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
if (cmd->type != CommandId::Performance &&
|
||||
cmd->type != CommandId::DataSourcePcmInt16Version1 &&
|
||||
cmd->type != CommandId::DataSourcePcmInt16Version2 &&
|
||||
cmd->type != CommandId::DataSourcePcmFloatVersion1 &&
|
||||
cmd->type != CommandId::DataSourcePcmFloatVersion2 &&
|
||||
cmd->type != CommandId::DataSourceAdpcmVersion1 &&
|
||||
cmd->type != CommandId::DataSourceAdpcmVersion2) {
|
||||
if (cmd->type == CommandId::Performance ||
|
||||
cmd->type == CommandId::DataSourcePcmInt16Version1 ||
|
||||
cmd->type == CommandId::DataSourcePcmInt16Version2 ||
|
||||
cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
|
||||
cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
|
||||
cmd->type == CommandId::DataSourceAdpcmVersion1 ||
|
||||
cmd->type == CommandId::DataSourceAdpcmVersion2) {
|
||||
break;
|
||||
}
|
||||
command_list += cmd->size;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
|
||||
@@ -767,6 +783,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
const auto node_id_type{cmd->node_id >> 28};
|
||||
const auto node_id_base{cmd->node_id & 0xFFF};
|
||||
|
||||
// If the new estimated process time falls below the limit, we're done dropping.
|
||||
if (estimated_process_time <= time_limit) {
|
||||
break;
|
||||
}
|
||||
@@ -775,6 +792,7 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
break;
|
||||
}
|
||||
|
||||
// Don't drop voices marked with the highest priority.
|
||||
auto& voice_info{voice_context.GetInfo(node_id_base)};
|
||||
if (voice_info.priority == HighestVoicePriority) {
|
||||
break;
|
||||
@@ -783,18 +801,23 @@ u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_proces
|
||||
voices_dropped++;
|
||||
voice_info.voice_dropped = true;
|
||||
|
||||
if (i < command_buffer.count) {
|
||||
while (cmd->node_id == node_id) {
|
||||
if (cmd->type == CommandId::DepopPrepare) {
|
||||
cmd->enabled = true;
|
||||
} else if (cmd->type == CommandId::Performance || !cmd->enabled) {
|
||||
cmd->enabled = false;
|
||||
}
|
||||
i++;
|
||||
command_list += cmd->size;
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
// First iteration should drop the voice, and then iterate through all of the commands tied
|
||||
// to the voice. We don't need reverb on a voice which we've just removed, for example.
|
||||
// Depops can't be removed otherwise we'll introduce audio popping, and we don't
|
||||
// remove perf commands. Lower the estimated time for each command dropped.
|
||||
while (i < command_buffer.count && cmd->node_id == node_id) {
|
||||
if (cmd->type == CommandId::DepopPrepare) {
|
||||
cmd->enabled = true;
|
||||
} else if (cmd->enabled && cmd->type != CommandId::Performance) {
|
||||
cmd->enabled = false;
|
||||
estimated_process_time -= static_cast<u32>(
|
||||
drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
|
||||
}
|
||||
command_list += cmd->size;
|
||||
cmd = reinterpret_cast<ICommand*>(command_list);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return voices_dropped;
|
||||
}
|
||||
|
||||
@@ -196,6 +196,20 @@ public:
|
||||
*/
|
||||
u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
|
||||
|
||||
/**
|
||||
* Get the current voice drop parameter.
|
||||
*
|
||||
* @return The current voice drop.
|
||||
*/
|
||||
f32 GetVoiceDropParameter() const;
|
||||
|
||||
/**
|
||||
* Set the voice drop parameter.
|
||||
*
|
||||
* @param The new voice drop.
|
||||
*/
|
||||
void SetVoiceDropParameter(f32 voice_drop);
|
||||
|
||||
private:
|
||||
/// Core system
|
||||
Core::System& core;
|
||||
@@ -301,6 +315,8 @@ private:
|
||||
u32 num_voices_dropped{};
|
||||
/// Tick that rendering started
|
||||
u64 render_start_tick{};
|
||||
/// Parameter to control the threshold for dropping voices if the audio graph gets too large
|
||||
f32 drop_voice_param{1.0f};
|
||||
};
|
||||
|
||||
} // namespace AudioRenderer
|
||||
|
||||
@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
|
||||
}
|
||||
|
||||
std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
|
||||
return a->priority != b->priority ? a->priority < b->priority
|
||||
: a->sort_order < b->sort_order;
|
||||
return a->priority != b->priority ? a->priority > b->priority
|
||||
: a->sort_order > b->sort_order;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +230,9 @@ std::vector<std::string> ListSDLSinkDevices(bool capture) {
|
||||
|
||||
const int device_count = SDL_GetNumAudioDevices(capture);
|
||||
for (int i = 0; i < device_count; ++i) {
|
||||
device_list.emplace_back(SDL_GetAudioDeviceName(i, 0));
|
||||
if (const char* name = SDL_GetAudioDeviceName(i, capture)) {
|
||||
device_list.emplace_back(name);
|
||||
}
|
||||
}
|
||||
|
||||
return device_list;
|
||||
|
||||
@@ -156,12 +156,13 @@ if (MSVC)
|
||||
)
|
||||
target_compile_options(common PRIVATE
|
||||
/W4
|
||||
/WX
|
||||
|
||||
/we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(common PRIVATE
|
||||
-Werror
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
)
|
||||
endif()
|
||||
@@ -169,7 +170,11 @@ endif()
|
||||
create_target_directory_groups(common)
|
||||
|
||||
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
|
||||
target_link_libraries(common PRIVATE lz4::lz4)
|
||||
if (TARGET lz4::lz4)
|
||||
target_link_libraries(common PRIVATE lz4::lz4)
|
||||
else()
|
||||
target_link_libraries(common PRIVATE LZ4::lz4_shared)
|
||||
endif()
|
||||
if (TARGET zstd::zstd)
|
||||
target_link_libraries(common PRIVATE zstd::zstd)
|
||||
else()
|
||||
|
||||
@@ -141,10 +141,6 @@ public:
|
||||
constexpr BitField(BitField&&) noexcept = default;
|
||||
constexpr BitField& operator=(BitField&&) noexcept = default;
|
||||
|
||||
[[nodiscard]] constexpr operator T() const {
|
||||
return Value();
|
||||
}
|
||||
|
||||
constexpr void Assign(const T& value) {
|
||||
#ifdef _MSC_VER
|
||||
storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
|
||||
@@ -162,6 +158,17 @@ public:
|
||||
return ExtractValue(storage);
|
||||
}
|
||||
|
||||
template <typename ConvertedToType>
|
||||
[[nodiscard]] constexpr ConvertedToType As() const {
|
||||
static_assert(!std::is_same_v<T, ConvertedToType>,
|
||||
"Unnecessary cast. Use Value() instead.");
|
||||
return static_cast<ConvertedToType>(Value());
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr operator T() const {
|
||||
return Value();
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr explicit operator bool() const {
|
||||
return Value() != 0;
|
||||
}
|
||||
|
||||
@@ -21,11 +21,6 @@ constexpr size_t hardware_interference_size = std::hardware_destructive_interfer
|
||||
constexpr size_t hardware_interference_size = 64;
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4324)
|
||||
#endif
|
||||
|
||||
template <typename T, size_t capacity = 0x400>
|
||||
class MPSCQueue {
|
||||
public:
|
||||
@@ -160,8 +155,4 @@ private:
|
||||
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
|
||||
};
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -31,8 +31,10 @@
|
||||
|
||||
#ifndef _MSC_VER
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#if defined(ARCHITECTURE_x86_64)
|
||||
#define Crash() __asm__ __volatile__("int $3")
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
#define Crash() __asm__ __volatile__("brk #0")
|
||||
#else
|
||||
#define Crash() exit(1)
|
||||
#endif
|
||||
|
||||
@@ -3,24 +3,14 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iterator>
|
||||
#include <type_traits>
|
||||
|
||||
namespace Common {
|
||||
|
||||
// Check if type is like an STL container
|
||||
// Check if type satisfies the ContiguousContainer named requirement.
|
||||
template <typename T>
|
||||
concept IsSTLContainer = requires(T t) {
|
||||
typename T::value_type;
|
||||
typename T::iterator;
|
||||
typename T::const_iterator;
|
||||
// TODO(ogniK): Replace below is std::same_as<void> when MSVC supports it.
|
||||
t.begin();
|
||||
t.end();
|
||||
t.cbegin();
|
||||
t.cend();
|
||||
t.data();
|
||||
t.size();
|
||||
};
|
||||
concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>;
|
||||
|
||||
// TODO: Replace with std::derived_from when the <concepts> header
|
||||
// is available on all supported platforms.
|
||||
@@ -34,4 +24,12 @@ concept DerivedFrom = requires {
|
||||
template <typename From, typename To>
|
||||
concept ConvertibleTo = std::is_convertible_v<From, To>;
|
||||
|
||||
// No equivalents in the stdlib
|
||||
|
||||
template <typename T>
|
||||
concept IsArithmetic = std::is_arithmetic_v<T>;
|
||||
|
||||
template <typename T>
|
||||
concept IsIntegral = std::is_integral_v<T>;
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,14 +4,7 @@
|
||||
// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
|
||||
// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
|
||||
|
||||
#ifndef FIXED_H_
|
||||
#define FIXED_H_
|
||||
|
||||
#if __cplusplus >= 201402L
|
||||
#define CONSTEXPR14 constexpr
|
||||
#else
|
||||
#define CONSTEXPR14
|
||||
#endif
|
||||
#pragma once
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <cstdint>
|
||||
@@ -19,6 +12,8 @@
|
||||
#include <ostream>
|
||||
#include <type_traits>
|
||||
|
||||
#include <common/concepts.h>
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <size_t I, size_t F>
|
||||
@@ -57,8 +52,8 @@ struct type_from_size<64> {
|
||||
static constexpr size_t size = 64;
|
||||
|
||||
using value_type = int64_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<128>;
|
||||
};
|
||||
|
||||
@@ -68,8 +63,8 @@ struct type_from_size<32> {
|
||||
static constexpr size_t size = 32;
|
||||
|
||||
using value_type = int32_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<64>;
|
||||
};
|
||||
|
||||
@@ -79,8 +74,8 @@ struct type_from_size<16> {
|
||||
static constexpr size_t size = 16;
|
||||
|
||||
using value_type = int16_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<32>;
|
||||
};
|
||||
|
||||
@@ -90,8 +85,8 @@ struct type_from_size<8> {
|
||||
static constexpr size_t size = 8;
|
||||
|
||||
using value_type = int8_t;
|
||||
using unsigned_type = std::make_unsigned<value_type>::type;
|
||||
using signed_type = std::make_signed<value_type>::type;
|
||||
using unsigned_type = std::make_unsigned_t<value_type>;
|
||||
using signed_type = std::make_signed_t<value_type>;
|
||||
using next_size = type_from_size<16>;
|
||||
};
|
||||
|
||||
@@ -106,9 +101,9 @@ constexpr B next_to_base(N rhs) {
|
||||
struct divide_by_zero : std::exception {};
|
||||
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
constexpr FixedPoint<I, F> divide(
|
||||
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
|
||||
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using next_type = typename FixedPoint<I, F>::next_type;
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
@@ -126,9 +121,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
}
|
||||
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
constexpr FixedPoint<I, F> divide(
|
||||
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
|
||||
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
|
||||
|
||||
@@ -196,9 +191,9 @@ CONSTEXPR14 FixedPoint<I, F> divide(
|
||||
|
||||
// this is the usual implementation of multiplication
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
constexpr FixedPoint<I, F> multiply(
|
||||
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
|
||||
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using next_type = typename FixedPoint<I, F>::next_type;
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
@@ -215,9 +210,9 @@ CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
// it is slightly slower, but is more robust since it doesn't
|
||||
// require and upgraded type
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> multiply(
|
||||
constexpr FixedPoint<I, F> multiply(
|
||||
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
|
||||
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
|
||||
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
|
||||
|
||||
using base_type = typename FixedPoint<I, F>::base_type;
|
||||
|
||||
@@ -272,19 +267,20 @@ public:
|
||||
static constexpr base_type one = base_type(1) << fractional_bits;
|
||||
|
||||
public: // constructors
|
||||
FixedPoint() = default;
|
||||
FixedPoint(const FixedPoint&) = default;
|
||||
FixedPoint(FixedPoint&&) = default;
|
||||
FixedPoint& operator=(const FixedPoint&) = default;
|
||||
constexpr FixedPoint() = default;
|
||||
|
||||
template <class Number>
|
||||
constexpr FixedPoint(
|
||||
Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
|
||||
: data_(static_cast<base_type>(n * one)) {}
|
||||
constexpr FixedPoint(const FixedPoint&) = default;
|
||||
constexpr FixedPoint& operator=(const FixedPoint&) = default;
|
||||
|
||||
constexpr FixedPoint(FixedPoint&&) noexcept = default;
|
||||
constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
|
||||
|
||||
template <IsArithmetic Number>
|
||||
constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
|
||||
|
||||
public: // conversion
|
||||
template <size_t I2, size_t F2>
|
||||
CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
|
||||
constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
|
||||
static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
|
||||
using T = FixedPoint<I2, F2>;
|
||||
|
||||
@@ -308,36 +304,14 @@ public:
|
||||
}
|
||||
|
||||
public: // comparison operators
|
||||
constexpr bool operator==(FixedPoint rhs) const {
|
||||
return data_ == rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator!=(FixedPoint rhs) const {
|
||||
return data_ != rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator<(FixedPoint rhs) const {
|
||||
return data_ < rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator>(FixedPoint rhs) const {
|
||||
return data_ > rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator<=(FixedPoint rhs) const {
|
||||
return data_ <= rhs.data_;
|
||||
}
|
||||
|
||||
constexpr bool operator>=(FixedPoint rhs) const {
|
||||
return data_ >= rhs.data_;
|
||||
}
|
||||
friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
|
||||
|
||||
public: // unary operators
|
||||
constexpr bool operator!() const {
|
||||
[[nodiscard]] constexpr bool operator!() const {
|
||||
return !data_;
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator~() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator~() const {
|
||||
// NOTE(eteran): this will often appear to "just negate" the value
|
||||
// that is not an error, it is because -x == (~x+1)
|
||||
// and that "+1" is adding an infinitesimally small fraction to the
|
||||
@@ -345,89 +319,87 @@ public: // unary operators
|
||||
return FixedPoint::from_base(~data_);
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator-() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator-() const {
|
||||
return FixedPoint::from_base(-data_);
|
||||
}
|
||||
|
||||
constexpr FixedPoint operator+() const {
|
||||
[[nodiscard]] constexpr FixedPoint operator+() const {
|
||||
return FixedPoint::from_base(+data_);
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator++() {
|
||||
constexpr FixedPoint& operator++() {
|
||||
data_ += one;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator--() {
|
||||
constexpr FixedPoint& operator--() {
|
||||
data_ -= one;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint operator++(int) {
|
||||
constexpr FixedPoint operator++(int) {
|
||||
FixedPoint tmp(*this);
|
||||
data_ += one;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint operator--(int) {
|
||||
constexpr FixedPoint operator--(int) {
|
||||
FixedPoint tmp(*this);
|
||||
data_ -= one;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
public: // basic math operators
|
||||
CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator+=(FixedPoint n) {
|
||||
data_ += n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator-=(FixedPoint n) {
|
||||
data_ -= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator*=(FixedPoint n) {
|
||||
return assign(detail::multiply(*this, n));
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator/=(FixedPoint n) {
|
||||
FixedPoint temp;
|
||||
return assign(detail::divide(*this, n, temp));
|
||||
}
|
||||
|
||||
private:
|
||||
CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
|
||||
constexpr FixedPoint& assign(FixedPoint rhs) {
|
||||
data_ = rhs.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
public: // binary math operators, effects underlying bit pattern since these
|
||||
// don't really typically make sense for non-integer values
|
||||
CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator&=(FixedPoint n) {
|
||||
data_ &= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator|=(FixedPoint n) {
|
||||
data_ |= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
|
||||
constexpr FixedPoint& operator^=(FixedPoint n) {
|
||||
data_ ^= n.data_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
|
||||
template <IsIntegral Integer>
|
||||
constexpr FixedPoint& operator>>=(Integer n) {
|
||||
data_ >>= n;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
|
||||
template <IsIntegral Integer>
|
||||
constexpr FixedPoint& operator<<=(Integer n) {
|
||||
data_ <<= n;
|
||||
return *this;
|
||||
}
|
||||
@@ -437,42 +409,42 @@ public: // conversion to basic types
|
||||
data_ += (data_ & fractional_mask) >> 1;
|
||||
}
|
||||
|
||||
constexpr int to_int() {
|
||||
[[nodiscard]] constexpr int to_int() {
|
||||
round_up();
|
||||
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr unsigned int to_uint() const {
|
||||
[[nodiscard]] constexpr unsigned int to_uint() {
|
||||
round_up();
|
||||
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int64_t to_long() {
|
||||
[[nodiscard]] constexpr int64_t to_long() {
|
||||
round_up();
|
||||
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int to_int_floor() const {
|
||||
[[nodiscard]] constexpr int to_int_floor() const {
|
||||
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr int64_t to_long_floor() {
|
||||
[[nodiscard]] constexpr int64_t to_long_floor() const {
|
||||
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr unsigned int to_uint_floor() const {
|
||||
[[nodiscard]] constexpr unsigned int to_uint_floor() const {
|
||||
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
|
||||
}
|
||||
|
||||
constexpr float to_float() const {
|
||||
[[nodiscard]] constexpr float to_float() const {
|
||||
return static_cast<float>(data_) / FixedPoint::one;
|
||||
}
|
||||
|
||||
constexpr double to_double() const {
|
||||
[[nodiscard]] constexpr double to_double() const {
|
||||
return static_cast<double>(data_) / FixedPoint::one;
|
||||
}
|
||||
|
||||
constexpr base_type to_raw() const {
|
||||
[[nodiscard]] constexpr base_type to_raw() const {
|
||||
return data_;
|
||||
}
|
||||
|
||||
@@ -480,27 +452,27 @@ public: // conversion to basic types
|
||||
data_ &= fractional_mask;
|
||||
}
|
||||
|
||||
constexpr base_type get_frac() const {
|
||||
[[nodiscard]] constexpr base_type get_frac() const {
|
||||
return data_ & fractional_mask;
|
||||
}
|
||||
|
||||
public:
|
||||
CONSTEXPR14 void swap(FixedPoint& rhs) {
|
||||
constexpr void swap(FixedPoint& rhs) noexcept {
|
||||
using std::swap;
|
||||
swap(data_, rhs.data_);
|
||||
}
|
||||
|
||||
public:
|
||||
base_type data_;
|
||||
base_type data_{};
|
||||
};
|
||||
|
||||
// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
|
||||
// smaller type
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -508,10 +480,10 @@ operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -519,10 +491,10 @@ operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -530,10 +502,10 @@ operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
}
|
||||
|
||||
template <size_t I1, size_t I2, size_t F>
|
||||
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
|
||||
operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
|
||||
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
|
||||
|
||||
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
|
||||
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
|
||||
|
||||
const T l = T::from_base(lhs.to_raw());
|
||||
const T r = T::from_base(rhs.to_raw());
|
||||
@@ -548,159 +520,133 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
|
||||
|
||||
// basic math operators
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs += rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs -= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs *= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
|
||||
lhs /= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs += FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs -= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs *= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
|
||||
lhs /= FixedPoint<I, F>(rhs);
|
||||
return lhs;
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp += rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp -= rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp *= rhs;
|
||||
return tmp;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
|
||||
FixedPoint<I, F> tmp(lhs);
|
||||
tmp /= rhs;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
// shift operators
|
||||
template <size_t I, size_t F, class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
template <size_t I, size_t F, IsIntegral Integer>
|
||||
constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
lhs <<= rhs;
|
||||
return lhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Integer,
|
||||
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
|
||||
CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
template <size_t I, size_t F, IsIntegral Integer>
|
||||
constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
|
||||
lhs >>= rhs;
|
||||
return lhs;
|
||||
}
|
||||
|
||||
// comparison operators
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs > FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs < FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs >= FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs <= FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs == FixedPoint<I, F>(rhs);
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
|
||||
return lhs != FixedPoint<I, F>(rhs);
|
||||
}
|
||||
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) > rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) < rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) >= rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) <= rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) == rhs;
|
||||
}
|
||||
template <size_t I, size_t F, class Number,
|
||||
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
|
||||
template <size_t I, size_t F, IsArithmetic Number>
|
||||
constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
|
||||
return FixedPoint<I, F>(lhs) != rhs;
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
#undef CONSTEXPR14
|
||||
|
||||
#endif
|
||||
|
||||
@@ -209,8 +209,8 @@ public:
|
||||
|
||||
/**
|
||||
* Helper function which deduces the value type of a contiguous STL container used in ReadSpan.
|
||||
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
|
||||
* ReadObject and T must be a trivially copyable object.
|
||||
* If T is not a contiguous container as defined by the concept IsContiguousContainer, this
|
||||
* calls ReadObject and T must be a trivially copyable object.
|
||||
*
|
||||
* See ReadSpan for more details if T is a contiguous container.
|
||||
* See ReadObject for more details if T is a trivially copyable object.
|
||||
@@ -223,7 +223,7 @@ public:
|
||||
*/
|
||||
template <typename T>
|
||||
[[nodiscard]] size_t Read(T& data) const {
|
||||
if constexpr (IsSTLContainer<T>) {
|
||||
if constexpr (IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Data type must be trivially copyable.");
|
||||
@@ -235,8 +235,8 @@ public:
|
||||
|
||||
/**
|
||||
* Helper function which deduces the value type of a contiguous STL container used in WriteSpan.
|
||||
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
|
||||
* WriteObject and T must be a trivially copyable object.
|
||||
* If T is not a contiguous STL container as defined by the concept IsContiguousContainer, this
|
||||
* calls WriteObject and T must be a trivially copyable object.
|
||||
*
|
||||
* See WriteSpan for more details if T is a contiguous container.
|
||||
* See WriteObject for more details if T is a trivially copyable object.
|
||||
@@ -249,7 +249,7 @@ public:
|
||||
*/
|
||||
template <typename T>
|
||||
[[nodiscard]] size_t Write(const T& data) const {
|
||||
if constexpr (IsSTLContainer<T>) {
|
||||
if constexpr (IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Data type must be trivially copyable.");
|
||||
|
||||
@@ -359,6 +359,12 @@ public:
|
||||
}
|
||||
});
|
||||
|
||||
long page_size = sysconf(_SC_PAGESIZE);
|
||||
if (page_size != 0x1000) {
|
||||
LOG_CRITICAL(HW_Memory, "page size {:#x} is incompatible with 4K paging", page_size);
|
||||
throw std::bad_alloc{};
|
||||
}
|
||||
|
||||
// Backing memory initialization
|
||||
#if defined(__FreeBSD__) && __FreeBSD__ < 13
|
||||
// XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30
|
||||
|
||||
@@ -100,7 +100,6 @@ enum class CameraError {
|
||||
enum class VibrationAmplificationType {
|
||||
Linear,
|
||||
Exponential,
|
||||
Test,
|
||||
};
|
||||
|
||||
// Analog properties for calibration
|
||||
@@ -325,6 +324,10 @@ public:
|
||||
return VibrationError::NotSupported;
|
||||
}
|
||||
|
||||
virtual bool IsVibrationEnabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) {
|
||||
return PollingError::NotSupported;
|
||||
}
|
||||
|
||||
@@ -151,6 +151,7 @@ void UpdateRescalingInfo() {
|
||||
ASSERT(false);
|
||||
info.up_scale = 1;
|
||||
info.down_shift = 0;
|
||||
break;
|
||||
}
|
||||
info.up_factor = static_cast<f32>(info.up_scale) / (1U << info.down_shift);
|
||||
info.down_factor = static_cast<f32>(1U << info.down_shift) / info.up_scale;
|
||||
|
||||
@@ -190,8 +190,13 @@ add_library(core STATIC
|
||||
hle/kernel/k_code_memory.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
hle/kernel/k_debug.h
|
||||
hle/kernel/k_dynamic_page_manager.h
|
||||
hle/kernel/k_dynamic_resource_manager.h
|
||||
hle/kernel/k_dynamic_slab_heap.h
|
||||
hle/kernel/k_event.cpp
|
||||
hle/kernel/k_event.h
|
||||
hle/kernel/k_event_info.h
|
||||
hle/kernel/k_handle_table.cpp
|
||||
hle/kernel/k_handle_table.h
|
||||
hle/kernel/k_interrupt_manager.cpp
|
||||
@@ -219,6 +224,8 @@ add_library(core STATIC
|
||||
hle/kernel/k_page_group.h
|
||||
hle/kernel/k_page_table.cpp
|
||||
hle/kernel/k_page_table.h
|
||||
hle/kernel/k_page_table_manager.h
|
||||
hle/kernel/k_page_table_slab_heap.h
|
||||
hle/kernel/k_port.cpp
|
||||
hle/kernel/k_port.h
|
||||
hle/kernel/k_priority_queue.h
|
||||
@@ -240,6 +247,8 @@ add_library(core STATIC
|
||||
hle/kernel/k_server_session.h
|
||||
hle/kernel/k_session.cpp
|
||||
hle/kernel/k_session.h
|
||||
hle/kernel/k_session_request.cpp
|
||||
hle/kernel/k_session_request.h
|
||||
hle/kernel/k_shared_memory.cpp
|
||||
hle/kernel/k_shared_memory.h
|
||||
hle/kernel/k_shared_memory_info.h
|
||||
@@ -249,6 +258,8 @@ add_library(core STATIC
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_system_resource.cpp
|
||||
hle/kernel/k_system_resource.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_local_page.cpp
|
||||
@@ -486,10 +497,6 @@ add_library(core STATIC
|
||||
hle/service/hid/irsensor/processor_base.h
|
||||
hle/service/hid/irsensor/tera_plugin_processor.cpp
|
||||
hle/service/hid/irsensor/tera_plugin_processor.h
|
||||
hle/service/jit/jit_context.cpp
|
||||
hle/service/jit/jit_context.h
|
||||
hle/service/jit/jit.cpp
|
||||
hle/service/jit/jit.h
|
||||
hle/service/lbl/lbl.cpp
|
||||
hle/service/lbl/lbl.h
|
||||
hle/service/ldn/lan_discovery.cpp
|
||||
@@ -769,19 +776,15 @@ if (MSVC)
|
||||
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
|
||||
)
|
||||
else()
|
||||
target_compile_options(core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
|
||||
-Wno-sign-conversion
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
)
|
||||
endif()
|
||||
|
||||
@@ -798,14 +801,18 @@ if (ENABLE_WEB_SERVICE)
|
||||
target_link_libraries(core PRIVATE web_service)
|
||||
endif()
|
||||
|
||||
if (ARCHITECTURE_x86_64)
|
||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||
target_sources(core PRIVATE
|
||||
arm/dynarmic/arm_dynarmic_32.cpp
|
||||
arm/dynarmic/arm_dynarmic_32.h
|
||||
arm/dynarmic/arm_dynarmic_64.cpp
|
||||
arm/dynarmic/arm_dynarmic_64.h
|
||||
arm/dynarmic/arm_dynarmic_32.cpp
|
||||
arm/dynarmic/arm_dynarmic_32.h
|
||||
arm/dynarmic/arm_dynarmic_cp15.cpp
|
||||
arm/dynarmic/arm_dynarmic_cp15.h
|
||||
hle/service/jit/jit_context.cpp
|
||||
hle/service/jit/jit_context.h
|
||||
hle/service/jit/jit.cpp
|
||||
hle/service/jit/jit.h
|
||||
)
|
||||
target_link_libraries(core PRIVATE dynarmic)
|
||||
endif()
|
||||
|
||||
@@ -134,6 +134,14 @@ void ARM_Interface::Run() {
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
|
||||
// If the thread is scheduled for termination, exit the thread.
|
||||
if (current_thread->HasDpc()) {
|
||||
if (current_thread->IsTerminationRequested()) {
|
||||
current_thread->Exit();
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit,
|
||||
// or if the thread is unable to continue for any reason.
|
||||
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
||||
|
||||
@@ -301,6 +301,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ARCHITECTURE_arm64
|
||||
// TODO: remove when fixed in dynarmic
|
||||
config.optimizations &= ~Dynarmic::OptimizationFlag::BlockLinking;
|
||||
#endif
|
||||
|
||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||
}
|
||||
|
||||
@@ -450,7 +455,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::S
|
||||
// Frame records are two words long:
|
||||
// fp+0 : pointer to previous frame record
|
||||
// fp+4 : value of lr for frame
|
||||
while (true) {
|
||||
for (size_t i = 0; i < 256; i++) {
|
||||
out.push_back({"", 0, lr, 0, ""});
|
||||
if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) {
|
||||
break;
|
||||
|
||||
@@ -517,7 +517,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::S
|
||||
// Frame records are two words long:
|
||||
// fp+0 : pointer to previous frame record
|
||||
// fp+8 : value of lr for frame
|
||||
while (true) {
|
||||
for (size_t i = 0; i < 256; i++) {
|
||||
out.push_back({"", 0, lr, 0, ""});
|
||||
if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) {
|
||||
break;
|
||||
|
||||
@@ -52,12 +52,16 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1
|
||||
case 4:
|
||||
// CP15_DATA_SYNC_BARRIER
|
||||
return Callback{
|
||||
[](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
|
||||
#ifdef _MSC_VER
|
||||
[](void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
|
||||
#if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64)
|
||||
_mm_mfence();
|
||||
_mm_lfence();
|
||||
#else
|
||||
#elif defined(ARCHITECTURE_x86_64)
|
||||
asm volatile("mfence\n\tlfence\n\t" : : : "memory");
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
asm volatile("dsb sy\n\t" : : : "memory");
|
||||
#else
|
||||
#error Unsupported architecture
|
||||
#endif
|
||||
return 0;
|
||||
},
|
||||
@@ -66,11 +70,15 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1
|
||||
case 5:
|
||||
// CP15_DATA_MEMORY_BARRIER
|
||||
return Callback{
|
||||
[](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
|
||||
#ifdef _MSC_VER
|
||||
[](void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
|
||||
#if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64)
|
||||
_mm_mfence();
|
||||
#else
|
||||
#elif defined(ARCHITECTURE_x86_64)
|
||||
asm volatile("mfence\n\t" : : : "memory");
|
||||
#elif defined(ARCHITECTURE_arm64)
|
||||
asm volatile("dmb sy\n\t" : : : "memory");
|
||||
#else
|
||||
#error Unsupported architecture
|
||||
#endif
|
||||
return 0;
|
||||
},
|
||||
@@ -115,7 +123,7 @@ CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1,
|
||||
CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) {
|
||||
if (!two && opc == 0 && CRm == CoprocReg::C14) {
|
||||
// CNTPCT
|
||||
const auto callback = [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 {
|
||||
const auto callback = [](void* arg, u32, u32) -> u64 {
|
||||
const auto& parent_arg = *static_cast<ARM_Dynarmic_32*>(arg);
|
||||
return parent_arg.system.CoreTiming().GetClockTicks();
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
|
||||
#include "core/arm/dynarmic/arm_exclusive_monitor.h"
|
||||
#endif
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
@@ -13,7 +13,7 @@ ExclusiveMonitor::~ExclusiveMonitor() = default;
|
||||
|
||||
std::unique_ptr<Core::ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory,
|
||||
std::size_t num_cores) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
|
||||
return std::make_unique<Core::DynarmicExclusiveMonitor>(memory, num_cores);
|
||||
#else
|
||||
// TODO(merry): Passthrough exclusive monitor
|
||||
|
||||
@@ -133,6 +133,56 @@ struct System::Impl {
|
||||
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
|
||||
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
|
||||
|
||||
void Initialize(System& system) {
|
||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
|
||||
|
||||
core_timing.SetMulticore(is_multicore);
|
||||
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
||||
|
||||
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
||||
const auto current_time =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
||||
Settings::values.custom_rtc_differential =
|
||||
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
||||
|
||||
// Create a default fs if one doesn't already exist.
|
||||
if (virtual_filesystem == nullptr) {
|
||||
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
||||
}
|
||||
if (content_provider == nullptr) {
|
||||
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
||||
}
|
||||
|
||||
// Create default implementations of applets if one is not provided.
|
||||
applet_manager.SetDefaultAppletsIfMissing();
|
||||
|
||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
|
||||
kernel.SetMulticore(is_multicore);
|
||||
cpu_manager.SetMulticore(is_multicore);
|
||||
cpu_manager.SetAsyncGpu(is_async_gpu);
|
||||
}
|
||||
|
||||
void ReinitializeIfNecessary(System& system) {
|
||||
const bool must_reinitialize =
|
||||
is_multicore != Settings::values.use_multi_core.GetValue() ||
|
||||
extended_memory_layout != Settings::values.use_extended_memory_layout.GetValue();
|
||||
|
||||
if (!must_reinitialize) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_DEBUG(Kernel, "Re-initializing");
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
|
||||
|
||||
Initialize(system);
|
||||
}
|
||||
|
||||
SystemResultStatus Run() {
|
||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||
status = SystemResultStatus::Success;
|
||||
@@ -178,37 +228,14 @@ struct System::Impl {
|
||||
debugger = std::make_unique<Debugger>(system, port);
|
||||
}
|
||||
|
||||
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
|
||||
SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
|
||||
LOG_DEBUG(Core, "initialized OK");
|
||||
|
||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
|
||||
kernel.SetMulticore(is_multicore);
|
||||
cpu_manager.SetMulticore(is_multicore);
|
||||
cpu_manager.SetAsyncGpu(is_async_gpu);
|
||||
core_timing.SetMulticore(is_multicore);
|
||||
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
|
||||
ReinitializeIfNecessary(system);
|
||||
|
||||
kernel.Initialize();
|
||||
cpu_manager.Initialize();
|
||||
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
||||
|
||||
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
||||
const auto current_time =
|
||||
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
||||
Settings::values.custom_rtc_differential =
|
||||
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
||||
|
||||
// Create a default fs if one doesn't already exist.
|
||||
if (virtual_filesystem == nullptr)
|
||||
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
||||
if (content_provider == nullptr)
|
||||
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
||||
|
||||
/// Create default implementations of applets if one is not provided.
|
||||
applet_manager.SetDefaultAppletsIfMissing();
|
||||
|
||||
/// Reset all glue registrations
|
||||
arp_manager.ResetAll();
|
||||
@@ -253,11 +280,11 @@ struct System::Impl {
|
||||
return SystemResultStatus::ErrorGetLoader;
|
||||
}
|
||||
|
||||
SystemResultStatus init_result{Init(system, emu_window)};
|
||||
SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
|
||||
if (init_result != SystemResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
|
||||
static_cast<int>(init_result));
|
||||
Shutdown();
|
||||
ShutdownMainProcess();
|
||||
return init_result;
|
||||
}
|
||||
|
||||
@@ -276,7 +303,7 @@ struct System::Impl {
|
||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||
if (load_result != Loader::ResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
|
||||
Shutdown();
|
||||
ShutdownMainProcess();
|
||||
|
||||
return static_cast<SystemResultStatus>(
|
||||
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
|
||||
@@ -335,7 +362,7 @@ struct System::Impl {
|
||||
return status;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
void ShutdownMainProcess() {
|
||||
SetShuttingDown(true);
|
||||
|
||||
// Log last frame performance stats if game was loded
|
||||
@@ -363,13 +390,14 @@ struct System::Impl {
|
||||
kernel.ShutdownCores();
|
||||
cpu_manager.Shutdown();
|
||||
debugger.reset();
|
||||
services->KillNVNFlinger();
|
||||
kernel.CloseServices();
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
time_manager.Shutdown();
|
||||
core_timing.Shutdown();
|
||||
core_timing.ClearPendingEvents();
|
||||
app_loader.reset();
|
||||
audio_core.reset();
|
||||
gpu_core.reset();
|
||||
@@ -377,7 +405,6 @@ struct System::Impl {
|
||||
perf_stats.reset();
|
||||
kernel.Shutdown();
|
||||
memory.Reset();
|
||||
applet_manager.ClearAll();
|
||||
|
||||
if (auto room_member = room_network.GetRoomMember().lock()) {
|
||||
Network::GameInfo game_info{};
|
||||
@@ -500,6 +527,7 @@ struct System::Impl {
|
||||
|
||||
bool is_multicore{};
|
||||
bool is_async_gpu{};
|
||||
bool extended_memory_layout{};
|
||||
|
||||
ExecuteProgramCallback execute_program_callback;
|
||||
ExitCallback exit_callback;
|
||||
@@ -520,6 +548,10 @@ const CpuManager& System::GetCpuManager() const {
|
||||
return impl->cpu_manager;
|
||||
}
|
||||
|
||||
void System::Initialize() {
|
||||
impl->Initialize(*this);
|
||||
}
|
||||
|
||||
SystemResultStatus System::Run() {
|
||||
return impl->Run();
|
||||
}
|
||||
@@ -540,8 +572,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
||||
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
|
||||
}
|
||||
|
||||
void System::Shutdown() {
|
||||
impl->Shutdown();
|
||||
void System::ShutdownMainProcess() {
|
||||
impl->ShutdownMainProcess();
|
||||
}
|
||||
|
||||
bool System::IsShuttingDown() const {
|
||||
|
||||
@@ -142,6 +142,12 @@ public:
|
||||
System(System&&) = delete;
|
||||
System& operator=(System&&) = delete;
|
||||
|
||||
/**
|
||||
* Initializes the system
|
||||
* This function will initialize core functionaility used for system emulation
|
||||
*/
|
||||
void Initialize();
|
||||
|
||||
/**
|
||||
* Run the OS and Application
|
||||
* This function will start emulation and run the relevant devices
|
||||
@@ -166,8 +172,8 @@ public:
|
||||
|
||||
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
||||
|
||||
/// Shutdown the emulated system.
|
||||
void Shutdown();
|
||||
/// Shutdown the main emulated process.
|
||||
void ShutdownMainProcess();
|
||||
|
||||
/// Check if the core is shutting down.
|
||||
[[nodiscard]] bool IsShuttingDown() const;
|
||||
|
||||
@@ -40,7 +40,9 @@ struct CoreTiming::Event {
|
||||
CoreTiming::CoreTiming()
|
||||
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
|
||||
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
CoreTiming::~CoreTiming() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
constexpr char name[] = "HostTiming";
|
||||
@@ -53,6 +55,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
Reset();
|
||||
on_thread_init = std::move(on_thread_init_);
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
@@ -65,17 +68,8 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
}
|
||||
ClearPendingEvents();
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
@@ -196,10 +190,6 @@ u64 CoreTiming::GetClockTicks() const {
|
||||
return CpuCyclesToClockCycles(ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
|
||||
@@ -307,6 +297,18 @@ void CoreTiming::ThreadLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Reset() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
}
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetTimeNS();
|
||||
|
||||
@@ -61,19 +61,14 @@ public:
|
||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||
void Initialize(std::function<void()>&& on_thread_init_);
|
||||
|
||||
/// Tears down all timing related functionality.
|
||||
void Shutdown();
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
/// Sets if emulation is multicore or single core, must be set before Initialize
|
||||
void SetMulticore(bool is_multicore_) {
|
||||
is_multicore = is_multicore_;
|
||||
}
|
||||
|
||||
/// Check if it's using host timing.
|
||||
bool IsHostTiming() const {
|
||||
return is_multicore;
|
||||
}
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread.
|
||||
void Pause(bool is_paused);
|
||||
|
||||
@@ -136,12 +131,11 @@ public:
|
||||
private:
|
||||
struct Event;
|
||||
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
void ThreadLoop();
|
||||
|
||||
void Reset();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
|
||||
s64 global_timer = 0;
|
||||
|
||||
@@ -31,12 +31,14 @@ public:
|
||||
DramMemoryMap::Base;
|
||||
}
|
||||
|
||||
u8* GetPointer(PAddr addr) {
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
template <typename T>
|
||||
T* GetPointer(PAddr addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
const u8* GetPointer(PAddr addr) const {
|
||||
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||
template <typename T>
|
||||
const T* GetPointer(PAddr addr) const {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
Common::HostMemory buffer;
|
||||
|
||||
@@ -232,8 +232,8 @@ const std::vector<std::shared_ptr<NCA>>& XCI::GetNCAs() const {
|
||||
|
||||
std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const {
|
||||
const auto program_id = secure_partition->GetProgramTitleID();
|
||||
const auto iter = std::find_if(
|
||||
ncas.begin(), ncas.end(), [this, type, program_id](const std::shared_ptr<NCA>& nca) {
|
||||
const auto iter =
|
||||
std::find_if(ncas.begin(), ncas.end(), [type, program_id](const std::shared_ptr<NCA>& nca) {
|
||||
return nca->GetType() == type && nca->GetTitleId() == program_id;
|
||||
});
|
||||
return iter == ncas.end() ? nullptr : *iter;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/control_metadata.h"
|
||||
@@ -37,6 +38,27 @@ std::string LanguageEntry::GetDeveloperName() const {
|
||||
developer_name.size());
|
||||
}
|
||||
|
||||
constexpr std::array<Language, 18> language_to_codes = {{
|
||||
Language::Japanese,
|
||||
Language::AmericanEnglish,
|
||||
Language::French,
|
||||
Language::German,
|
||||
Language::Italian,
|
||||
Language::Spanish,
|
||||
Language::Chinese,
|
||||
Language::Korean,
|
||||
Language::Dutch,
|
||||
Language::Portuguese,
|
||||
Language::Russian,
|
||||
Language::Taiwanese,
|
||||
Language::BritishEnglish,
|
||||
Language::CanadianFrench,
|
||||
Language::LatinAmericanSpanish,
|
||||
Language::Chinese,
|
||||
Language::Taiwanese,
|
||||
Language::BrazilianPortuguese,
|
||||
}};
|
||||
|
||||
NACP::NACP() = default;
|
||||
|
||||
NACP::NACP(VirtualFile file) {
|
||||
@@ -45,9 +67,13 @@ NACP::NACP(VirtualFile file) {
|
||||
|
||||
NACP::~NACP() = default;
|
||||
|
||||
const LanguageEntry& NACP::GetLanguageEntry(Language language) const {
|
||||
if (language != Language::Default) {
|
||||
return raw.language_entries.at(static_cast<u8>(language));
|
||||
const LanguageEntry& NACP::GetLanguageEntry() const {
|
||||
Language language = language_to_codes[Settings::values.language_index.GetValue()];
|
||||
|
||||
{
|
||||
const auto& language_entry = raw.language_entries.at(static_cast<u8>(language));
|
||||
if (!language_entry.GetApplicationName().empty())
|
||||
return language_entry;
|
||||
}
|
||||
|
||||
for (const auto& language_entry : raw.language_entries) {
|
||||
@@ -55,16 +81,15 @@ const LanguageEntry& NACP::GetLanguageEntry(Language language) const {
|
||||
return language_entry;
|
||||
}
|
||||
|
||||
// Fallback to English
|
||||
return GetLanguageEntry(Language::AmericanEnglish);
|
||||
return raw.language_entries.at(static_cast<u8>(Language::AmericanEnglish));
|
||||
}
|
||||
|
||||
std::string NACP::GetApplicationName(Language language) const {
|
||||
return GetLanguageEntry(language).GetApplicationName();
|
||||
std::string NACP::GetApplicationName() const {
|
||||
return GetLanguageEntry().GetApplicationName();
|
||||
}
|
||||
|
||||
std::string NACP::GetDeveloperName(Language language) const {
|
||||
return GetLanguageEntry(language).GetDeveloperName();
|
||||
std::string NACP::GetDeveloperName() const {
|
||||
return GetLanguageEntry().GetDeveloperName();
|
||||
}
|
||||
|
||||
u64 NACP::GetTitleId() const {
|
||||
|
||||
@@ -101,9 +101,9 @@ public:
|
||||
explicit NACP(VirtualFile file);
|
||||
~NACP();
|
||||
|
||||
const LanguageEntry& GetLanguageEntry(Language language = Language::Default) const;
|
||||
std::string GetApplicationName(Language language = Language::Default) const;
|
||||
std::string GetDeveloperName(Language language = Language::Default) const;
|
||||
const LanguageEntry& GetLanguageEntry() const;
|
||||
std::string GetApplicationName() const;
|
||||
std::string GetDeveloperName() const;
|
||||
u64 GetTitleId() const;
|
||||
u64 GetDLCBaseTitleId() const;
|
||||
std::string GetVersionString() const;
|
||||
|
||||
@@ -127,7 +127,7 @@ void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address
|
||||
}
|
||||
|
||||
bool ProgramMetadata::Is64BitProgram() const {
|
||||
return npdm_header.has_64_bit_instructions;
|
||||
return npdm_header.has_64_bit_instructions.As<bool>();
|
||||
}
|
||||
|
||||
ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/uuid.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs.h"
|
||||
@@ -59,6 +60,36 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
|
||||
attr.title_id == 0 && attr.save_id == 0);
|
||||
}
|
||||
|
||||
std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
|
||||
u128 user_id) {
|
||||
// Only detect nand user saves.
|
||||
const auto space_id_path = [space_id]() -> std::string_view {
|
||||
switch (space_id) {
|
||||
case SaveDataSpaceId::NandUser:
|
||||
return "/user/save";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}();
|
||||
|
||||
if (space_id_path.empty()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
Common::UUID uuid;
|
||||
std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
|
||||
|
||||
// Only detect account/device saves from the future location.
|
||||
switch (type) {
|
||||
case SaveDataType::SaveData:
|
||||
return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
|
||||
case SaveDataType::DeviceSaveData:
|
||||
return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
std::string SaveDataAttribute::DebugInfo() const {
|
||||
@@ -82,7 +113,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
|
||||
PrintSaveDataAttributeWarnings(meta);
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
auto out = dir->CreateDirectoryRelative(save_directory);
|
||||
|
||||
@@ -99,7 +130,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
|
||||
const SaveDataAttribute& meta) const {
|
||||
|
||||
const auto save_directory =
|
||||
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
|
||||
|
||||
auto out = dir->GetDirectoryRelative(save_directory);
|
||||
|
||||
@@ -134,9 +165,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
|
||||
}
|
||||
}
|
||||
|
||||
std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space,
|
||||
SaveDataType type, u64 title_id, u128 user_id,
|
||||
u64 save_id) {
|
||||
std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
|
||||
SaveDataSpaceId space, SaveDataType type, u64 title_id,
|
||||
u128 user_id, u64 save_id) {
|
||||
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
|
||||
// be interpreted as the title id of the current process.
|
||||
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
|
||||
@@ -145,6 +176,17 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
|
||||
}
|
||||
}
|
||||
|
||||
// For compat with a future impl.
|
||||
if (std::string future_path =
|
||||
GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
|
||||
!future_path.empty()) {
|
||||
// Check if this location exists, and prefer it over the old.
|
||||
if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
|
||||
LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
|
||||
return future_path;
|
||||
}
|
||||
}
|
||||
|
||||
std::string out = GetSaveDataSpaceIdPath(space);
|
||||
|
||||
switch (type) {
|
||||
@@ -167,7 +209,8 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId s
|
||||
|
||||
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
u128 user_id) const {
|
||||
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
|
||||
@@ -185,7 +228,8 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
|
||||
|
||||
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
|
||||
SaveDataSize new_value) const {
|
||||
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto path =
|
||||
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
|
||||
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
|
||||
|
||||
const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);
|
||||
|
||||
@@ -95,8 +95,8 @@ public:
|
||||
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
|
||||
|
||||
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
|
||||
static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type,
|
||||
u64 title_id, u128 user_id, u64 save_id);
|
||||
static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
|
||||
SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
|
||||
|
||||
SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
|
||||
void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
|
||||
|
||||
@@ -970,14 +970,7 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
|
||||
Common::Input::VibrationError::None;
|
||||
}
|
||||
|
||||
bool EmulatedController::TestVibration(std::size_t device_index) {
|
||||
if (device_index >= output_devices.size()) {
|
||||
return false;
|
||||
}
|
||||
if (!output_devices[device_index]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
|
||||
const auto player_index = NpadIdTypeToIndex(npad_id_type);
|
||||
const auto& player = Settings::values.players.GetValue()[player_index];
|
||||
|
||||
@@ -985,31 +978,15 @@ bool EmulatedController::TestVibration(std::size_t device_index) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Common::Input::VibrationStatus test_vibration = {
|
||||
.low_amplitude = 0.001f,
|
||||
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
|
||||
.high_amplitude = 0.001f,
|
||||
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
|
||||
.type = Common::Input::VibrationAmplificationType::Test,
|
||||
};
|
||||
if (device_index >= output_devices.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const Common::Input::VibrationStatus zero_vibration = {
|
||||
.low_amplitude = DEFAULT_VIBRATION_VALUE.low_amplitude,
|
||||
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
|
||||
.high_amplitude = DEFAULT_VIBRATION_VALUE.high_amplitude,
|
||||
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
|
||||
.type = Common::Input::VibrationAmplificationType::Test,
|
||||
};
|
||||
if (!output_devices[device_index]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Send a slight vibration to test for rumble support
|
||||
output_devices[device_index]->SetVibration(test_vibration);
|
||||
|
||||
// Wait for about 15ms to ensure the controller is ready for the stop command
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(15));
|
||||
|
||||
// Stop any vibration and return the result
|
||||
return output_devices[device_index]->SetVibration(zero_vibration) ==
|
||||
Common::Input::VibrationError::None;
|
||||
return output_devices[device_index]->IsVibrationEnabled();
|
||||
}
|
||||
|
||||
bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
|
||||
@@ -1048,6 +1025,7 @@ bool EmulatedController::HasNfc() const {
|
||||
case NpadStyleIndex::JoyconRight:
|
||||
case NpadStyleIndex::JoyconDual:
|
||||
case NpadStyleIndex::ProController:
|
||||
case NpadStyleIndex::Handheld:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
@@ -1158,27 +1136,27 @@ bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
|
||||
const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
|
||||
switch (type) {
|
||||
case NpadStyleIndex::ProController:
|
||||
return supported_style_tag.fullkey;
|
||||
return supported_style_tag.fullkey.As<bool>();
|
||||
case NpadStyleIndex::Handheld:
|
||||
return supported_style_tag.handheld;
|
||||
return supported_style_tag.handheld.As<bool>();
|
||||
case NpadStyleIndex::JoyconDual:
|
||||
return supported_style_tag.joycon_dual;
|
||||
return supported_style_tag.joycon_dual.As<bool>();
|
||||
case NpadStyleIndex::JoyconLeft:
|
||||
return supported_style_tag.joycon_left;
|
||||
return supported_style_tag.joycon_left.As<bool>();
|
||||
case NpadStyleIndex::JoyconRight:
|
||||
return supported_style_tag.joycon_right;
|
||||
return supported_style_tag.joycon_right.As<bool>();
|
||||
case NpadStyleIndex::GameCube:
|
||||
return supported_style_tag.gamecube;
|
||||
return supported_style_tag.gamecube.As<bool>();
|
||||
case NpadStyleIndex::Pokeball:
|
||||
return supported_style_tag.palma;
|
||||
return supported_style_tag.palma.As<bool>();
|
||||
case NpadStyleIndex::NES:
|
||||
return supported_style_tag.lark;
|
||||
return supported_style_tag.lark.As<bool>();
|
||||
case NpadStyleIndex::SNES:
|
||||
return supported_style_tag.lucia;
|
||||
return supported_style_tag.lucia.As<bool>();
|
||||
case NpadStyleIndex::N64:
|
||||
return supported_style_tag.lagoon;
|
||||
return supported_style_tag.lagoon.As<bool>();
|
||||
case NpadStyleIndex::SegaGenesis:
|
||||
return supported_style_tag.lager;
|
||||
return supported_style_tag.lager.As<bool>();
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@@ -1234,12 +1212,6 @@ bool EmulatedController::IsConnected(bool get_temporary_value) const {
|
||||
return is_connected;
|
||||
}
|
||||
|
||||
bool EmulatedController::IsVibrationEnabled() const {
|
||||
const auto player_index = NpadIdTypeToIndex(npad_id_type);
|
||||
const auto& player = Settings::values.players.GetValue()[player_index];
|
||||
return player.vibration_enabled;
|
||||
}
|
||||
|
||||
NpadIdType EmulatedController::GetNpadIdType() const {
|
||||
std::scoped_lock lock{mutex};
|
||||
return npad_id_type;
|
||||
|
||||
@@ -206,9 +206,6 @@ public:
|
||||
*/
|
||||
bool IsConnected(bool get_temporary_value = false) const;
|
||||
|
||||
/// Returns true if vibration is enabled
|
||||
bool IsVibrationEnabled() const;
|
||||
|
||||
/// Removes all callbacks created from input devices
|
||||
void UnloadInput();
|
||||
|
||||
@@ -339,7 +336,7 @@ public:
|
||||
* Sends a small vibration to the output device
|
||||
* @return true if SetVibration was successfull
|
||||
*/
|
||||
bool TestVibration(std::size_t device_index);
|
||||
bool IsVibrationEnabled(std::size_t device_index);
|
||||
|
||||
/**
|
||||
* Sets the desired data to be polled from a controller
|
||||
|
||||
@@ -14,7 +14,7 @@ enum class CameraAmbientNoiseLevel : u32 {
|
||||
Low,
|
||||
Medium,
|
||||
High,
|
||||
Unkown3, // This level can't be reached
|
||||
Unknown3, // This level can't be reached
|
||||
};
|
||||
|
||||
// This is nn::irsensor::CameraLightTarget
|
||||
@@ -75,9 +75,9 @@ enum class IrCameraStatus : u32 {
|
||||
enum class IrCameraInternalStatus : u32 {
|
||||
Stopped,
|
||||
FirmwareUpdateNeeded,
|
||||
Unkown2,
|
||||
Unkown3,
|
||||
Unkown4,
|
||||
Unknown2,
|
||||
Unknown3,
|
||||
Unknown4,
|
||||
FirmwareVersionRequested,
|
||||
FirmwareVersionIsInvalid,
|
||||
Ready,
|
||||
@@ -121,20 +121,20 @@ enum class IrSensorFunctionLevel : u8 {
|
||||
|
||||
// This is nn::irsensor::MomentProcessorPreprocess
|
||||
enum class MomentProcessorPreprocess : u32 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
// This is nn::irsensor::PackedMomentProcessorPreprocess
|
||||
enum class PackedMomentProcessorPreprocess : u8 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
// This is nn::irsensor::PointingStatus
|
||||
enum class PointingStatus : u32 {
|
||||
Unkown0,
|
||||
Unkown1,
|
||||
Unknown0,
|
||||
Unknown1,
|
||||
};
|
||||
|
||||
struct IrsRect {
|
||||
|
||||
@@ -86,13 +86,13 @@ public:
|
||||
u32 num_domain_objects{};
|
||||
const bool always_move_handles{
|
||||
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
|
||||
if (!ctx.Session()->IsDomain() || always_move_handles) {
|
||||
if (!ctx.GetManager()->IsDomain() || always_move_handles) {
|
||||
num_handles_to_move = num_objects_to_move;
|
||||
} else {
|
||||
num_domain_objects = num_objects_to_move;
|
||||
}
|
||||
|
||||
if (ctx.Session()->IsDomain()) {
|
||||
if (ctx.GetManager()->IsDomain()) {
|
||||
raw_data_size +=
|
||||
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
|
||||
ctx.write_size += num_domain_objects;
|
||||
@@ -125,7 +125,7 @@ public:
|
||||
if (!ctx.IsTipc()) {
|
||||
AlignWithPadding();
|
||||
|
||||
if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) {
|
||||
if (ctx.GetManager()->IsDomain() && ctx.HasDomainMessageHeader()) {
|
||||
IPC::DomainMessageHeader domain_header{};
|
||||
domain_header.num_objects = num_domain_objects;
|
||||
PushRaw(domain_header);
|
||||
@@ -145,18 +145,18 @@ public:
|
||||
|
||||
template <class T>
|
||||
void PushIpcInterface(std::shared_ptr<T> iface) {
|
||||
if (context->Session()->IsDomain()) {
|
||||
if (context->GetManager()->IsDomain()) {
|
||||
context->AddDomainObject(std::move(iface));
|
||||
} else {
|
||||
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
|
||||
Kernel::LimitableResource::Sessions, 1);
|
||||
Kernel::LimitableResource::SessionCountMax, 1);
|
||||
|
||||
auto* session = Kernel::KSession::Create(kernel);
|
||||
session->Initialize(nullptr, iface->GetServiceName(),
|
||||
std::make_shared<Kernel::SessionRequestManager>(kernel));
|
||||
session->Initialize(nullptr, iface->GetServiceName());
|
||||
iface->RegisterSession(&session->GetServerSession(),
|
||||
std::make_shared<Kernel::SessionRequestManager>(kernel));
|
||||
|
||||
context->AddMoveObject(&session->GetClientSession());
|
||||
iface->ClientConnected(&session->GetServerSession());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ public:
|
||||
|
||||
template <class T>
|
||||
std::weak_ptr<T> PopIpcInterface() {
|
||||
ASSERT(context->Session()->IsDomain());
|
||||
ASSERT(context->GetManager()->IsDomain());
|
||||
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
|
||||
return context->GetDomainHandler<T>(Pop<u32>() - 1);
|
||||
}
|
||||
@@ -405,7 +405,7 @@ inline s32 RequestParser::Pop() {
|
||||
}
|
||||
|
||||
// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects.
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
||||
#endif
|
||||
@@ -416,7 +416,7 @@ void RequestParser::PopRaw(T& value) {
|
||||
std::memcpy(&value, cmdbuf + index, sizeof(T));
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
||||
@@ -8,6 +8,10 @@
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
// This can be overridden as needed.
|
||||
static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB
|
||||
|
||||
public:
|
||||
class Init {
|
||||
public:
|
||||
|
||||
@@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
woken_dummy_threads.insert(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
woken_dummy_threads.erase(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
|
||||
ASSERT(IsLocked());
|
||||
|
||||
for (auto* thread : woken_dummy_threads) {
|
||||
thread->DummyThreadEndWait();
|
||||
}
|
||||
|
||||
woken_dummy_threads.clear();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
@@ -58,6 +59,10 @@ public:
|
||||
/// Returns true if the global scheduler lock is acquired
|
||||
bool IsLocked() const;
|
||||
|
||||
void UnregisterDummyThreadForWakeup(KThread* thread);
|
||||
void RegisterDummyThreadForWakeup(KThread* thread);
|
||||
void WakeupWaitingDummyThreads();
|
||||
|
||||
[[nodiscard]] LockType& SchedulerLock() {
|
||||
return scheduler_lock;
|
||||
}
|
||||
@@ -76,6 +81,9 @@ private:
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists dummy threads pending wakeup on lock release
|
||||
std::set<KThread*> woken_dummy_threads;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<KThread*> thread_list;
|
||||
std::mutex global_list_guard;
|
||||
|
||||
@@ -16,27 +16,39 @@
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_server_port.h"
|
||||
#include "core/hle/kernel/k_server_session.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
|
||||
ServiceThreadType thread_type)
|
||||
: kernel{kernel_} {
|
||||
if (thread_type == ServiceThreadType::CreateNew) {
|
||||
service_thread = kernel.CreateServiceThread(service_name_);
|
||||
} else {
|
||||
service_thread = kernel.GetDefaultServiceThread();
|
||||
}
|
||||
}
|
||||
: kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew
|
||||
? kernel.CreateServiceThread(service_name_)
|
||||
: kernel.GetDefaultServiceThread()} {}
|
||||
|
||||
SessionRequestHandler::~SessionRequestHandler() {
|
||||
kernel.ReleaseServiceThread(service_thread);
|
||||
}
|
||||
|
||||
void SessionRequestHandler::AcceptSession(KServerPort* server_port) {
|
||||
auto* server_session = server_port->AcceptSession();
|
||||
ASSERT(server_session != nullptr);
|
||||
|
||||
RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel));
|
||||
}
|
||||
|
||||
void SessionRequestHandler::RegisterSession(KServerSession* server_session,
|
||||
std::shared_ptr<SessionRequestManager> manager) {
|
||||
manager->SetSessionHandler(shared_from_this());
|
||||
service_thread.RegisterServerSession(server_session, manager);
|
||||
server_session->Close();
|
||||
}
|
||||
|
||||
SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
|
||||
SessionRequestManager::~SessionRequestManager() = default;
|
||||
@@ -56,15 +68,77 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
|
||||
}
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||
session->ClientConnected(shared_from_this());
|
||||
Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
|
||||
HLERequestContext& context) {
|
||||
Result result = ResultSuccess;
|
||||
|
||||
// Ensure our server session is tracked globally.
|
||||
kernel.RegisterServerObject(session);
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (this->HasSessionRequestHandler(context)) {
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
result = HandleDomainSyncRequest(server_session, context);
|
||||
// If there is no domain header, the regular session handler is used
|
||||
} else if (this->HasSessionHandler()) {
|
||||
// If this manager has an associated HLE handler, forward the request to it.
|
||||
result = this->SessionHandler().HandleSyncRequest(*server_session, context);
|
||||
}
|
||||
} else {
|
||||
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
|
||||
IPC::ResponseBuilder rb(context, 2);
|
||||
rb.Push(ResultSuccess);
|
||||
}
|
||||
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
||||
this->ConvertToDomain();
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
|
||||
session->ClientDisconnected();
|
||||
Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
|
||||
HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
|
||||
ASSERT(context.GetManager().get() == this);
|
||||
|
||||
// If there is a DomainMessageHeader, then this is CommandType "Request"
|
||||
const auto& domain_message_header = context.GetDomainMessageHeader();
|
||||
const u32 object_id{domain_message_header.object_id};
|
||||
switch (domain_message_header.command) {
|
||||
case IPC::DomainMessageHeader::CommandType::SendMessage:
|
||||
if (object_id > this->DomainHandlerCount()) {
|
||||
LOG_CRITICAL(IPC,
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"needed to return a new interface!",
|
||||
object_id);
|
||||
ASSERT(false);
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*server_session, context);
|
||||
} else {
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
|
||||
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
|
||||
|
||||
this->CloseDomainHandler(object_id - 1);
|
||||
|
||||
IPC::ResponseBuilder rb{context, 2};
|
||||
rb.Push(ResultSuccess);
|
||||
return ResultSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
|
||||
@@ -126,7 +200,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
// Padding to align to 16 bytes
|
||||
rp.AlignWithPadding();
|
||||
|
||||
if (Session()->IsDomain() &&
|
||||
if (GetManager()->IsDomain() &&
|
||||
((command_header->type == IPC::CommandType::Request ||
|
||||
command_header->type == IPC::CommandType::RequestWithContext) ||
|
||||
!incoming)) {
|
||||
@@ -135,7 +209,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
if (incoming || domain_message_header) {
|
||||
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
|
||||
} else {
|
||||
if (Session()->IsDomain()) {
|
||||
if (GetManager()->IsDomain()) {
|
||||
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
|
||||
}
|
||||
}
|
||||
@@ -228,12 +302,11 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
|
||||
// Write the domain objects to the command buffer, these go after the raw untranslated data.
|
||||
// TODO(Subv): This completely ignores C buffers.
|
||||
|
||||
if (Session()->IsDomain()) {
|
||||
if (GetManager()->IsDomain()) {
|
||||
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
|
||||
for (const auto& object : outgoing_domain_objects) {
|
||||
server_session->AppendDomainHandler(object);
|
||||
cmd_buf[current_offset++] =
|
||||
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
|
||||
for (auto& object : outgoing_domain_objects) {
|
||||
GetManager()->AppendDomainHandler(std::move(object));
|
||||
cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -45,11 +45,13 @@ class KAutoObject;
|
||||
class KernelCore;
|
||||
class KEvent;
|
||||
class KHandleTable;
|
||||
class KServerPort;
|
||||
class KProcess;
|
||||
class KServerSession;
|
||||
class KThread;
|
||||
class KReadableEvent;
|
||||
class KSession;
|
||||
class SessionRequestManager;
|
||||
class ServiceThread;
|
||||
|
||||
enum class ThreadWakeupReason;
|
||||
@@ -76,27 +78,17 @@ public:
|
||||
virtual Result HandleSyncRequest(Kernel::KServerSession& session,
|
||||
Kernel::HLERequestContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Signals that a client has just connected to this HLE handler and keeps the
|
||||
* associated ServerSession alive for the duration of the connection.
|
||||
* @param server_session Owning pointer to the ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientConnected(KServerSession* session);
|
||||
void AcceptSession(KServerPort* server_port);
|
||||
void RegisterSession(KServerSession* server_session,
|
||||
std::shared_ptr<SessionRequestManager> manager);
|
||||
|
||||
/**
|
||||
* Signals that a client has just disconnected from this HLE handler and releases the
|
||||
* associated ServerSession.
|
||||
* @param server_session ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientDisconnected(KServerSession* session);
|
||||
|
||||
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||
ServiceThread& GetServiceThread() const {
|
||||
return service_thread;
|
||||
}
|
||||
|
||||
protected:
|
||||
KernelCore& kernel;
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
ServiceThread& service_thread;
|
||||
};
|
||||
|
||||
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
|
||||
@@ -121,6 +113,10 @@ public:
|
||||
is_domain = true;
|
||||
}
|
||||
|
||||
void ConvertToDomainOnRequestEnd() {
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
std::size_t DomainHandlerCount() const {
|
||||
return domain_handlers.size();
|
||||
}
|
||||
@@ -158,13 +154,17 @@ public:
|
||||
session_handler = std::move(handler);
|
||||
}
|
||||
|
||||
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||
ServiceThread& GetServiceThread() const {
|
||||
return session_handler->GetServiceThread();
|
||||
}
|
||||
|
||||
bool HasSessionRequestHandler(const HLERequestContext& context) const;
|
||||
|
||||
Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
|
||||
Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
|
||||
|
||||
private:
|
||||
bool convert_to_domain{};
|
||||
bool is_domain{};
|
||||
SessionRequestHandlerPtr session_handler;
|
||||
std::vector<SessionRequestHandlerPtr> domain_handlers;
|
||||
@@ -295,7 +295,7 @@ public:
|
||||
*/
|
||||
template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
|
||||
std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
|
||||
if constexpr (Common::IsSTLContainer<T>) {
|
||||
if constexpr (Common::IsContiguousContainer<T>) {
|
||||
using ContiguousType = typename T::value_type;
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
"Container to WriteBuffer must contain trivially copyable objects");
|
||||
@@ -341,11 +341,11 @@ public:
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
|
||||
return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock());
|
||||
}
|
||||
|
||||
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
|
||||
manager = std::move(manager_);
|
||||
manager = manager_;
|
||||
}
|
||||
|
||||
std::string Description() const;
|
||||
@@ -354,6 +354,10 @@ public:
|
||||
return *thread;
|
||||
}
|
||||
|
||||
std::shared_ptr<SessionRequestManager> GetManager() const {
|
||||
return manager.lock();
|
||||
}
|
||||
|
||||
private:
|
||||
friend class IPC::ResponseBuilder;
|
||||
|
||||
@@ -387,7 +391,7 @@ private:
|
||||
u32 handles_offset{};
|
||||
u32 domain_offset{};
|
||||
|
||||
std::weak_ptr<SessionRequestManager> manager;
|
||||
std::weak_ptr<SessionRequestManager> manager{};
|
||||
|
||||
KernelCore& kernel;
|
||||
Core::Memory::Memory& memory;
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_debug.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_event_info.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_buffer.h"
|
||||
@@ -18,9 +20,11 @@
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_session.h"
|
||||
#include "core/hle/kernel/k_session_request.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_shared_memory_info.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
#include "core/hle/kernel/k_system_resource.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
@@ -34,6 +38,7 @@ namespace Kernel::Init {
|
||||
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
|
||||
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
|
||||
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
|
||||
HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
|
||||
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
|
||||
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
|
||||
@@ -42,7 +47,10 @@ namespace Kernel::Init {
|
||||
HANDLER(KThreadLocalPage, \
|
||||
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
|
||||
##__VA_ARGS__) \
|
||||
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
|
||||
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
|
||||
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
|
||||
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
|
||||
HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
|
||||
|
||||
namespace {
|
||||
|
||||
@@ -71,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5;
|
||||
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
|
||||
constexpr size_t SlabCountKIoPool = 1;
|
||||
constexpr size_t SlabCountKIoRegion = 6;
|
||||
constexpr size_t SlabcountKSessionRequestMappings = 40;
|
||||
|
||||
constexpr size_t SlabCountExtraKThread = 160;
|
||||
constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread;
|
||||
|
||||
namespace test {
|
||||
|
||||
static_assert(KernelPageBufferHeapSize ==
|
||||
2 * PageSize + (SlabCountKProcess + SlabCountKThread +
|
||||
(SlabCountKProcess + SlabCountKThread) / 8) *
|
||||
PageSize);
|
||||
static_assert(KernelPageBufferAdditionalSize ==
|
||||
(SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
|
||||
|
||||
} // namespace test
|
||||
|
||||
/// Helper function to translate from the slab virtual address to the reserved location in physical
|
||||
/// memory.
|
||||
@@ -94,8 +114,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
||||
|
||||
if (size > 0) {
|
||||
void* backing_kernel_memory{
|
||||
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||
void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
|
||||
TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||
|
||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||
ASSERT(region != nullptr);
|
||||
@@ -107,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||
}
|
||||
|
||||
size_t CalculateSlabHeapGapSize() {
|
||||
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
|
||||
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
|
||||
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
||||
return KernelSlabHeapGapSize;
|
||||
}
|
||||
@@ -132,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
|
||||
.num_KDebug = SlabCountKDebug,
|
||||
.num_KIoPool = SlabCountKIoPool,
|
||||
.num_KIoRegion = SlabCountKIoRegion,
|
||||
.num_KSessionRequestMappings = SlabcountKSessionRequestMappings,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -162,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
|
||||
return size;
|
||||
}
|
||||
|
||||
void InitializeKPageBufferSlabHeap(Core::System& system) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
const auto& counts = kernel.SlabResourceCounts();
|
||||
const size_t num_pages =
|
||||
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||
const size_t slab_size = num_pages * PageSize;
|
||||
|
||||
// Reserve memory from the system resource limit.
|
||||
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
|
||||
|
||||
// Allocate memory for the slab.
|
||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
||||
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
||||
const PAddr slab_address =
|
||||
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||
ASSERT(slab_address != 0);
|
||||
|
||||
// Initialize the slabheap.
|
||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
||||
slab_size);
|
||||
}
|
||||
|
||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
@@ -256,3 +254,30 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
}
|
||||
|
||||
} // namespace Kernel::Init
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
void KPageBufferSlabHeap::Initialize(Core::System& system) {
|
||||
auto& kernel = system.Kernel();
|
||||
const auto& counts = kernel.SlabResourceCounts();
|
||||
const size_t num_pages =
|
||||
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||
const size_t slab_size = num_pages * PageSize;
|
||||
|
||||
// Reserve memory from the system resource limit.
|
||||
ASSERT(
|
||||
kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemoryMax, slab_size));
|
||||
|
||||
// Allocate memory for the slab.
|
||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
||||
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
||||
const PAddr slab_address =
|
||||
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||
ASSERT(slab_address != 0);
|
||||
|
||||
// Initialize the slabheap.
|
||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
|
||||
slab_size);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,11 +33,11 @@ struct KSlabResourceCounts {
|
||||
size_t num_KDebug;
|
||||
size_t num_KIoPool;
|
||||
size_t num_KIoRegion;
|
||||
size_t num_KSessionRequestMappings;
|
||||
};
|
||||
|
||||
void InitializeSlabResourceCounts(KernelCore& kernel);
|
||||
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
|
||||
void InitializeKPageBufferSlabHeap(Core::System& system);
|
||||
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
|
||||
|
||||
} // namespace Kernel::Init
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include "core/hle/kernel/k_session.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_system_resource.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
|
||||
@@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject,
|
||||
// static_assert(std::is_final_v<KCodeMemory> &&
|
||||
// std::is_base_of_v<KAutoObject, KCodeMemory>);
|
||||
|
||||
static_assert(std::is_base_of_v<KAutoObject, KSystemResource>);
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -10,6 +10,8 @@ namespace Kernel {
|
||||
|
||||
class KAutoObject;
|
||||
|
||||
class KSystemResource;
|
||||
|
||||
class KClassTokenGenerator {
|
||||
public:
|
||||
using TokenBaseType = u16;
|
||||
@@ -58,7 +60,7 @@ private:
|
||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||
static_assert(T::ObjectType == ObjectType::KAutoObject);
|
||||
return 0;
|
||||
} else if constexpr (!std::is_final<T>::value) {
|
||||
} else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {
|
||||
static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
|
||||
T::ObjectType < ObjectType::BaseClassesEnd);
|
||||
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
|
||||
@@ -108,6 +110,8 @@ public:
|
||||
KSessionRequest,
|
||||
KCodeMemory,
|
||||
|
||||
KSystemResource,
|
||||
|
||||
// NOTE: True order for these has not been determined yet.
|
||||
KAlpha,
|
||||
KBeta,
|
||||
|
||||
@@ -58,11 +58,10 @@ bool KClientPort::IsSignaled() const {
|
||||
return num_sessions < max_sessions;
|
||||
}
|
||||
|
||||
Result KClientPort::CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager) {
|
||||
Result KClientPort::CreateSession(KClientSession** out) {
|
||||
// Reserve a new session from the resource limit.
|
||||
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
LimitableResource::SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Update the session counts.
|
||||
@@ -104,7 +103,7 @@ Result KClientPort::CreateSession(KClientSession** out,
|
||||
}
|
||||
|
||||
// Initialize the session.
|
||||
session->Initialize(this, parent->GetName(), session_manager);
|
||||
session->Initialize(this, parent->GetName());
|
||||
|
||||
// Commit the session reservation.
|
||||
session_reservation.Commit();
|
||||
|
||||
@@ -52,8 +52,7 @@ public:
|
||||
void Destroy() override;
|
||||
bool IsSignaled() const override;
|
||||
|
||||
Result CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
|
||||
Result CreateSession(KClientSession** out);
|
||||
|
||||
private:
|
||||
std::atomic<s32> num_sessions{};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_client_session.h"
|
||||
#include "core/hle/kernel/k_server_session.h"
|
||||
@@ -10,6 +11,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static constexpr u32 MessageBufferSize = 0x100;
|
||||
|
||||
KClientSession::KClientSession(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
KClientSession::~KClientSession() = default;
|
||||
@@ -22,8 +25,16 @@ void KClientSession::Destroy() {
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
Result KClientSession::SendSyncRequest() {
|
||||
// Signal the server session that new data is available
|
||||
return parent->GetServerSession().OnRequest();
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
|
||||
|
||||
// Send the request.
|
||||
return parent->GetServerSession().OnRequest(request);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||
|
||||
// Clear the memory.
|
||||
for (const auto& block : m_page_group.Nodes()) {
|
||||
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
|
||||
}
|
||||
|
||||
// Set remaining tracking members.
|
||||
|
||||
20
src/core/hle/kernel/k_debug.h
Normal file
20
src/core/hle/kernel/k_debug.h
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
169
src/core/hle/kernel/k_dynamic_page_manager.h
Normal file
169
src/core/hle/kernel/k_dynamic_page_manager.h
Normal file
@@ -0,0 +1,169 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_bitmap.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KDynamicPageManager {
|
||||
public:
|
||||
class PageBuffer {
|
||||
private:
|
||||
u8 m_buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageBuffer) == PageSize);
|
||||
|
||||
public:
|
||||
KDynamicPageManager() = default;
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr addr) {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(VAddr addr) const {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
Result Initialize(VAddr memory, size_t size, size_t align) {
|
||||
// We need to have positive size.
|
||||
R_UNLESS(size > 0, ResultOutOfMemory);
|
||||
m_backing_memory.resize(size);
|
||||
|
||||
// Set addresses.
|
||||
m_address = memory;
|
||||
m_aligned_address = Common::AlignDown(memory, align);
|
||||
|
||||
// Calculate extents.
|
||||
const size_t managed_size = m_address + size - m_aligned_address;
|
||||
const size_t overhead_size = Common::AlignUp(
|
||||
KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)),
|
||||
sizeof(PageBuffer));
|
||||
R_UNLESS(overhead_size < size, ResultOutOfMemory);
|
||||
|
||||
// Set tracking fields.
|
||||
m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer));
|
||||
m_count = m_size / sizeof(PageBuffer);
|
||||
|
||||
// Clear the management region.
|
||||
u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size);
|
||||
std::memset(management_ptr, 0, overhead_size);
|
||||
|
||||
// Initialize the bitmap.
|
||||
const size_t allocatable_region_size =
|
||||
(m_address + size - overhead_size) - m_aligned_address;
|
||||
ASSERT(allocatable_region_size >= sizeof(PageBuffer));
|
||||
|
||||
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
|
||||
|
||||
// Free the pages to the bitmap.
|
||||
for (size_t i = 0; i < m_count; i++) {
|
||||
// Ensure the freed page is all-zero.
|
||||
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
|
||||
|
||||
// Set the bit for the free page.
|
||||
m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) /
|
||||
sizeof(PageBuffer));
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
size_t GetUsed() const {
|
||||
return m_used;
|
||||
}
|
||||
size_t GetPeak() const {
|
||||
return m_peak;
|
||||
}
|
||||
size_t GetCount() const {
|
||||
return m_count;
|
||||
}
|
||||
|
||||
PageBuffer* Allocate() {
|
||||
// Take the lock.
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Find a random free block.
|
||||
s64 soffset = m_page_bitmap.FindFreeBlock(true);
|
||||
if (soffset < 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
// Update our tracking.
|
||||
m_page_bitmap.ClearBit(offset);
|
||||
m_peak = std::max(m_peak, (++m_used));
|
||||
|
||||
return GetPointer<PageBuffer>(m_aligned_address) + offset;
|
||||
}
|
||||
|
||||
PageBuffer* Allocate(size_t count) {
|
||||
// Take the lock.
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Find a random free block.
|
||||
s64 soffset = m_page_bitmap.FindFreeRange(count);
|
||||
if (soffset < 0) [[likely]] {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
// Update our tracking.
|
||||
m_page_bitmap.ClearRange(offset, count);
|
||||
m_used += count;
|
||||
m_peak = std::max(m_peak, m_used);
|
||||
|
||||
return GetPointer<PageBuffer>(m_aligned_address) + offset;
|
||||
}
|
||||
|
||||
void Free(PageBuffer* pb) {
|
||||
// Ensure all pages in the heap are zero.
|
||||
std::memset(pb, 0, PageSize);
|
||||
|
||||
// Take the lock.
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Set the bit for the free page.
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
|
||||
m_page_bitmap.SetBit(offset);
|
||||
|
||||
// Decrement our used count.
|
||||
--m_used;
|
||||
}
|
||||
|
||||
private:
|
||||
KSpinLock m_lock;
|
||||
KPageBitmap m_page_bitmap;
|
||||
size_t m_used{};
|
||||
size_t m_peak{};
|
||||
size_t m_count{};
|
||||
VAddr m_address{};
|
||||
VAddr m_aligned_address{};
|
||||
size_t m_size{};
|
||||
|
||||
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
|
||||
std::vector<u8> m_backing_memory;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
61
src/core/hle/kernel/k_dynamic_resource_manager.h
Normal file
61
src/core/hle/kernel/k_dynamic_resource_manager.h
Normal file
@@ -0,0 +1,61 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "core/hle/kernel/k_dynamic_slab_heap.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicResourceManager {
|
||||
YUZU_NON_COPYABLE(KDynamicResourceManager);
|
||||
YUZU_NON_MOVEABLE(KDynamicResourceManager);
|
||||
|
||||
public:
|
||||
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
|
||||
|
||||
public:
|
||||
constexpr KDynamicResourceManager() = default;
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return m_slab_heap->GetSize();
|
||||
}
|
||||
constexpr size_t GetUsed() const {
|
||||
return m_slab_heap->GetUsed();
|
||||
}
|
||||
constexpr size_t GetPeak() const {
|
||||
return m_slab_heap->GetPeak();
|
||||
}
|
||||
constexpr size_t GetCount() const {
|
||||
return m_slab_heap->GetCount();
|
||||
}
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
|
||||
m_page_allocator = page_allocator;
|
||||
m_slab_heap = slab_heap;
|
||||
}
|
||||
|
||||
T* Allocate() const {
|
||||
return m_slab_heap->Allocate(m_page_allocator);
|
||||
}
|
||||
|
||||
void Free(T* t) const {
|
||||
m_slab_heap->Free(t);
|
||||
}
|
||||
|
||||
private:
|
||||
KDynamicPageManager* m_page_allocator{};
|
||||
DynamicSlabType* m_slab_heap{};
|
||||
};
|
||||
|
||||
class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {};
|
||||
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
|
||||
|
||||
using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;
|
||||
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
|
||||
|
||||
} // namespace Kernel
|
||||
122
src/core/hle/kernel/k_dynamic_slab_heap.h
Normal file
122
src/core/hle/kernel/k_dynamic_slab_heap.h
Normal file
@@ -0,0 +1,122 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "core/hle/kernel/k_dynamic_page_manager.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
template <typename T, bool ClearNode = false>
|
||||
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
|
||||
YUZU_NON_COPYABLE(KDynamicSlabHeap);
|
||||
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
|
||||
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
constexpr size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
constexpr size_t GetUsed() const {
|
||||
return m_used.load();
|
||||
}
|
||||
constexpr size_t GetPeak() const {
|
||||
return m_peak.load();
|
||||
}
|
||||
constexpr size_t GetCount() const {
|
||||
return m_count.load();
|
||||
}
|
||||
|
||||
constexpr bool IsInRange(VAddr addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
}
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
|
||||
ASSERT(page_allocator != nullptr);
|
||||
|
||||
// Initialize members.
|
||||
m_address = page_allocator->GetAddress();
|
||||
m_size = page_allocator->GetSize();
|
||||
|
||||
// Initialize the base allocator.
|
||||
KSlabHeapImpl::Initialize();
|
||||
|
||||
// Allocate until we have the correct number of objects.
|
||||
while (m_count.load() < num_objects) {
|
||||
auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
|
||||
ASSERT(allocated != nullptr);
|
||||
|
||||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
KSlabHeapImpl::Free(allocated + i);
|
||||
}
|
||||
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
|
||||
T* Allocate(KDynamicPageManager* page_allocator) {
|
||||
T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
|
||||
|
||||
// If we successfully allocated and we should clear the node, do so.
|
||||
if constexpr (ClearNode) {
|
||||
if (allocated != nullptr) [[likely]] {
|
||||
reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// If we fail to allocate, try to get a new page from our next allocator.
|
||||
if (allocated == nullptr) [[unlikely]] {
|
||||
if (page_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T*>(page_allocator->Allocate());
|
||||
if (allocated != nullptr) {
|
||||
// If we succeeded in getting a page, free the rest to our slab.
|
||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
KSlabHeapImpl::Free(allocated + i);
|
||||
}
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (allocated != nullptr) [[likely]] {
|
||||
// Construct the object.
|
||||
std::construct_at(allocated);
|
||||
|
||||
// Update our tracking.
|
||||
const size_t used = ++m_used;
|
||||
size_t peak = m_peak.load();
|
||||
while (peak < used) {
|
||||
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allocated;
|
||||
}
|
||||
|
||||
void Free(T* t) {
|
||||
KSlabHeapImpl::Free(t);
|
||||
--m_used;
|
||||
}
|
||||
|
||||
private:
|
||||
using PageBuffer = KDynamicPageManager::PageBuffer;
|
||||
|
||||
private:
|
||||
std::atomic<size_t> m_used{};
|
||||
std::atomic<size_t> m_peak{};
|
||||
std::atomic<size_t> m_count{};
|
||||
VAddr m_address{};
|
||||
size_t m_size{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -50,7 +50,7 @@ Result KEvent::Clear() {
|
||||
void KEvent::PostDestroy(uintptr_t arg) {
|
||||
// Release the event count resource the owner process holds.
|
||||
KProcess* owner = reinterpret_cast<KProcess*>(arg);
|
||||
owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
|
||||
owner->GetResourceLimit()->Release(LimitableResource::EventCountMax, 1);
|
||||
owner->Close();
|
||||
}
|
||||
|
||||
|
||||
64
src/core/hle/kernel/k_event_info.h
Normal file
64
src/core/hle/kernel/k_event_info.h
Normal file
@@ -0,0 +1,64 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> {
|
||||
public:
|
||||
struct InfoCreateThread {
|
||||
u32 thread_id{};
|
||||
uintptr_t tls_address{};
|
||||
};
|
||||
|
||||
struct InfoExitProcess {
|
||||
Svc::ProcessExitReason reason{};
|
||||
};
|
||||
|
||||
struct InfoExitThread {
|
||||
Svc::ThreadExitReason reason{};
|
||||
};
|
||||
|
||||
struct InfoException {
|
||||
Svc::DebugException exception_type{};
|
||||
s32 exception_data_count{};
|
||||
uintptr_t exception_address{};
|
||||
std::array<uintptr_t, 4> exception_data{};
|
||||
};
|
||||
|
||||
struct InfoSystemCall {
|
||||
s64 tick{};
|
||||
s32 id{};
|
||||
};
|
||||
|
||||
public:
|
||||
KEventInfo() = default;
|
||||
~KEventInfo() = default;
|
||||
|
||||
public:
|
||||
Svc::DebugEvent event{};
|
||||
u32 thread_id{};
|
||||
u32 flags{};
|
||||
bool is_attached{};
|
||||
bool continue_flag{};
|
||||
bool ignore_continue{};
|
||||
bool close_once{};
|
||||
union {
|
||||
InfoCreateThread create_thread;
|
||||
InfoExitProcess exit_process;
|
||||
InfoExitThread exit_thread;
|
||||
InfoException exception;
|
||||
InfoSystemCall system_call;
|
||||
} info{};
|
||||
KThread* debug_thread{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -5,14 +5,11 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
KHandleTable::~KHandleTable() = default;
|
||||
|
||||
Result KHandleTable::Finalize() {
|
||||
// Get the table and clear our record of it.
|
||||
u16 saved_table_size = 0;
|
||||
{
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
std::swap(m_table_size, saved_table_size);
|
||||
@@ -25,28 +22,28 @@ Result KHandleTable::Finalize() {
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KHandleTable::Remove(Handle handle) {
|
||||
// Don't allow removal of a pseudo-handle.
|
||||
if (Svc::IsPseudoHandle(handle)) {
|
||||
if (Svc::IsPseudoHandle(handle)) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handles must not have reserved bits set.
|
||||
const auto handle_pack = HandlePack(handle);
|
||||
if (handle_pack.reserved != 0) {
|
||||
if (handle_pack.reserved != 0) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Find the object and free the entry.
|
||||
KAutoObject* obj = nullptr;
|
||||
{
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
if (this->IsValidHandle(handle)) {
|
||||
if (this->IsValidHandle(handle)) [[likely]] {
|
||||
const auto index = handle_pack.index;
|
||||
|
||||
obj = m_objects[index];
|
||||
@@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) {
|
||||
}
|
||||
|
||||
// Close the object.
|
||||
kernel.UnregisterInUseObject(obj);
|
||||
m_kernel.UnregisterInUseObject(obj);
|
||||
obj->Close();
|
||||
return true;
|
||||
}
|
||||
|
||||
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Never exceed our capacity.
|
||||
@@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KHandleTable::Reserve(Handle* out_handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Never exceed our capacity.
|
||||
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
|
||||
|
||||
*out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KHandleTable::Unreserve(Handle handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Unpack the handle.
|
||||
@@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||
ASSERT(reserved == 0);
|
||||
ASSERT(linear_id != 0);
|
||||
|
||||
if (index < m_table_size) {
|
||||
if (index < m_table_size) [[likely]] {
|
||||
// NOTE: This code does not check the linear id.
|
||||
ASSERT(m_objects[index] == nullptr);
|
||||
this->FreeEntry(index);
|
||||
@@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||
}
|
||||
|
||||
void KHandleTable::Register(Handle handle, KAutoObject* obj) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Unpack the handle.
|
||||
@@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {
|
||||
ASSERT(reserved == 0);
|
||||
ASSERT(linear_id != 0);
|
||||
|
||||
if (index < m_table_size) {
|
||||
if (index < m_table_size) [[likely]] {
|
||||
// Set the entry.
|
||||
ASSERT(m_objects[index] == nullptr);
|
||||
|
||||
|
||||
@@ -21,33 +21,38 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
|
||||
class KHandleTable {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KHandleTable);
|
||||
YUZU_NON_MOVEABLE(KHandleTable);
|
||||
|
||||
public:
|
||||
static constexpr size_t MaxTableSize = 1024;
|
||||
|
||||
explicit KHandleTable(KernelCore& kernel_);
|
||||
~KHandleTable();
|
||||
public:
|
||||
explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
|
||||
|
||||
Result Initialize(s32 size) {
|
||||
// Check that the table size is valid.
|
||||
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
|
||||
|
||||
// Lock.
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Initialize all fields.
|
||||
m_max_count = 0;
|
||||
m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
|
||||
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
|
||||
m_next_linear_id = MinLinearId;
|
||||
m_count = 0;
|
||||
m_free_head_index = -1;
|
||||
|
||||
// Free all entries.
|
||||
for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
|
||||
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
|
||||
m_objects[i] = nullptr;
|
||||
m_entry_infos[i].next_free_index = i - 1;
|
||||
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
|
||||
m_free_head_index = i;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
size_t GetTableSize() const {
|
||||
@@ -66,13 +71,13 @@ public:
|
||||
template <typename T = KAutoObject>
|
||||
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
||||
// Lock and look up in table.
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||
return this->GetObjectImpl(handle);
|
||||
} else {
|
||||
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
|
||||
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
|
||||
return obj->DynamicCast<T*>();
|
||||
} else {
|
||||
return nullptr;
|
||||
@@ -85,13 +90,13 @@ public:
|
||||
// Handle pseudo-handles.
|
||||
if constexpr (std::derived_from<KProcess, T>) {
|
||||
if (handle == Svc::PseudoHandle::CurrentProcess) {
|
||||
auto* const cur_process = kernel.CurrentProcess();
|
||||
auto* const cur_process = m_kernel.CurrentProcess();
|
||||
ASSERT(cur_process != nullptr);
|
||||
return cur_process;
|
||||
}
|
||||
} else if constexpr (std::derived_from<KThread, T>) {
|
||||
if (handle == Svc::PseudoHandle::CurrentThread) {
|
||||
auto* const cur_thread = GetCurrentThreadPointer(kernel);
|
||||
auto* const cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
ASSERT(cur_thread != nullptr);
|
||||
return cur_thread;
|
||||
}
|
||||
@@ -100,6 +105,37 @@ public:
|
||||
return this->template GetObjectWithoutPseudoHandle<T>(handle);
|
||||
}
|
||||
|
||||
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
|
||||
// Lock and look up in table.
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
return this->GetObjectImpl(handle);
|
||||
}
|
||||
|
||||
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const {
|
||||
// Handle pseudo-handles.
|
||||
ASSERT(cur_thread != nullptr);
|
||||
if (handle == Svc::PseudoHandle::CurrentProcess) {
|
||||
auto* const cur_process =
|
||||
static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess()));
|
||||
ASSERT(cur_process != nullptr);
|
||||
return cur_process;
|
||||
}
|
||||
if (handle == Svc::PseudoHandle::CurrentThread) {
|
||||
return static_cast<KAutoObject*>(cur_thread);
|
||||
}
|
||||
|
||||
return GetObjectForIpcWithoutPseudoHandle(handle);
|
||||
}
|
||||
|
||||
KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const {
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
return this->GetObjectByIndexImpl(out_handle, index);
|
||||
}
|
||||
|
||||
Result Reserve(Handle* out_handle);
|
||||
void Unreserve(Handle handle);
|
||||
|
||||
@@ -112,7 +148,7 @@ public:
|
||||
size_t num_opened;
|
||||
{
|
||||
// Lock the table.
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedDisableDispatch dd{m_kernel};
|
||||
KScopedSpinLock lk(m_lock);
|
||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||
// Get the current handle.
|
||||
@@ -120,13 +156,13 @@ public:
|
||||
|
||||
// Get the object for the current handle.
|
||||
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
|
||||
if (cur_object == nullptr) {
|
||||
if (cur_object == nullptr) [[unlikely]] {
|
||||
break;
|
||||
}
|
||||
|
||||
// Cast the current object to the desired type.
|
||||
T* cur_t = cur_object->DynamicCast<T*>();
|
||||
if (cur_t == nullptr) {
|
||||
if (cur_t == nullptr) [[unlikely]] {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -137,7 +173,7 @@ public:
|
||||
}
|
||||
|
||||
// If we converted every object, succeed.
|
||||
if (num_opened == num_handles) {
|
||||
if (num_opened == num_handles) [[likely]] {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -191,21 +227,21 @@ private:
|
||||
ASSERT(reserved == 0);
|
||||
|
||||
// Validate our indexing information.
|
||||
if (raw_value == 0) {
|
||||
if (raw_value == 0) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
if (linear_id == 0) {
|
||||
if (linear_id == 0) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
if (index >= m_table_size) {
|
||||
if (index >= m_table_size) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check that there's an object, and our serial id is correct.
|
||||
if (m_objects[index] == nullptr) {
|
||||
if (m_objects[index] == nullptr) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
if (m_entry_infos[index].GetLinearId() != linear_id) {
|
||||
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -215,11 +251,11 @@ private:
|
||||
KAutoObject* GetObjectImpl(Handle handle) const {
|
||||
// Handles must not have reserved bits set.
|
||||
const auto handle_pack = HandlePack(handle);
|
||||
if (handle_pack.reserved != 0) {
|
||||
if (handle_pack.reserved != 0) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (this->IsValidHandle(handle)) {
|
||||
if (this->IsValidHandle(handle)) [[likely]] {
|
||||
return m_objects[handle_pack.index];
|
||||
} else {
|
||||
return nullptr;
|
||||
@@ -227,9 +263,8 @@ private:
|
||||
}
|
||||
|
||||
KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
|
||||
|
||||
// Index must be in bounds.
|
||||
if (index >= m_table_size) {
|
||||
if (index >= m_table_size) [[unlikely]] {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -244,18 +279,15 @@ private:
|
||||
|
||||
private:
|
||||
union HandlePack {
|
||||
HandlePack() = default;
|
||||
HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
|
||||
constexpr HandlePack() = default;
|
||||
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
|
||||
|
||||
u32 raw;
|
||||
u32 raw{};
|
||||
BitField<0, 15, u32> index;
|
||||
BitField<15, 15, u32> linear_id;
|
||||
BitField<30, 2, u32> reserved;
|
||||
};
|
||||
|
||||
static constexpr u16 MinLinearId = 1;
|
||||
static constexpr u16 MaxLinearId = 0x7FFF;
|
||||
|
||||
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
|
||||
HandlePack handle{};
|
||||
handle.index.Assign(index);
|
||||
@@ -264,6 +296,10 @@ private:
|
||||
return handle.raw;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr u16 MinLinearId = 1;
|
||||
static constexpr u16 MaxLinearId = 0x7FFF;
|
||||
|
||||
union EntryInfo {
|
||||
u16 linear_id;
|
||||
s16 next_free_index;
|
||||
@@ -271,21 +307,21 @@ private:
|
||||
constexpr u16 GetLinearId() const {
|
||||
return linear_id;
|
||||
}
|
||||
constexpr s16 GetNextFreeIndex() const {
|
||||
constexpr s32 GetNextFreeIndex() const {
|
||||
return next_free_index;
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
KernelCore& m_kernel;
|
||||
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
|
||||
std::array<KAutoObject*, MaxTableSize> m_objects{};
|
||||
s32 m_free_head_index{-1};
|
||||
mutable KSpinLock m_lock;
|
||||
s32 m_free_head_index{};
|
||||
u16 m_table_size{};
|
||||
u16 m_max_count{};
|
||||
u16 m_next_linear_id{MinLinearId};
|
||||
u16 m_next_linear_id{};
|
||||
u16 m_count{};
|
||||
mutable KSpinLock m_lock;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -11,29 +11,34 @@
|
||||
namespace Kernel::KInterruptManager {
|
||||
|
||||
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
auto* process = kernel.CurrentProcess();
|
||||
if (!process) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Acknowledge the interrupt.
|
||||
kernel.PhysicalCore(core_id).ClearInterrupt();
|
||||
|
||||
auto& current_thread = GetCurrentThread(kernel);
|
||||
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
if (auto* process = kernel.CurrentProcess(); process) {
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Pin the current thread.
|
||||
process->PinCurrentThread(core_id);
|
||||
// Pin the current thread.
|
||||
process->PinCurrentThread(core_id);
|
||||
|
||||
// Set the interrupt flag for the thread.
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
// Set the interrupt flag for the thread.
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
}
|
||||
}
|
||||
|
||||
// Request interrupt scheduling.
|
||||
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
|
||||
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
|
||||
if (core_mask & (1ULL << core_id)) {
|
||||
kernel.PhysicalCore(core_id).Interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::KInterruptManager
|
||||
|
||||
@@ -11,6 +11,8 @@ class KernelCore;
|
||||
|
||||
namespace KInterruptManager {
|
||||
void HandleInterrupt(KernelCore& kernel, s32 core_id);
|
||||
}
|
||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
|
||||
|
||||
} // namespace KInterruptManager
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
|
||||
public KSlabAllocated<KLinkedListNode> {
|
||||
|
||||
public:
|
||||
explicit KLinkedListNode(KernelCore&) {}
|
||||
KLinkedListNode() = default;
|
||||
|
||||
void Initialize(void* it) {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
@@ -34,26 +35,32 @@ enum class KMemoryState : u32 {
|
||||
FlagCanMapProcess = (1 << 23),
|
||||
FlagCanChangeAttribute = (1 << 24),
|
||||
FlagCanCodeMemory = (1 << 25),
|
||||
FlagLinearMapped = (1 << 26),
|
||||
|
||||
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
|
||||
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
|
||||
FlagReferenceCounted | FlagCanChangeAttribute,
|
||||
FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped,
|
||||
|
||||
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
|
||||
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap | FlagReferenceCounted,
|
||||
FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped,
|
||||
|
||||
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
|
||||
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap |
|
||||
FlagLinearMapped,
|
||||
|
||||
Free = static_cast<u32>(Svc::MemoryState::Free),
|
||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
|
||||
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap,
|
||||
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
|
||||
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
|
||||
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
|
||||
FlagCanCodeMemory,
|
||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
|
||||
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
|
||||
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
|
||||
FlagLinearMapped,
|
||||
|
||||
// Alias was removed after 1.0.0.
|
||||
|
||||
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
|
||||
FlagCanCodeAlias,
|
||||
@@ -66,18 +73,18 @@ enum class KMemoryState : u32 {
|
||||
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
|
||||
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
ThreadLocal =
|
||||
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
|
||||
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
|
||||
|
||||
Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
|
||||
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
|
||||
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc |
|
||||
FlagCanUseNonDeviceIpc,
|
||||
|
||||
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
|
||||
|
||||
@@ -90,69 +97,69 @@ enum class KMemoryState : u32 {
|
||||
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
|
||||
|
||||
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanDebug,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted |
|
||||
FlagLinearMapped,
|
||||
|
||||
Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped,
|
||||
|
||||
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
|
||||
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
|
||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
|
||||
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
|
||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
|
||||
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
|
||||
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
|
||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
|
||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
|
||||
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
|
||||
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
|
||||
|
||||
enum class KMemoryPermission : u8 {
|
||||
None = 0,
|
||||
All = static_cast<u8>(~None),
|
||||
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
Execute = 1 << 2,
|
||||
|
||||
ReadAndWrite = Read | Write,
|
||||
ReadAndExecute = Read | Execute,
|
||||
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
|
||||
KernelShift = 3,
|
||||
|
||||
KernelRead = Read << KernelShift,
|
||||
KernelWrite = Write << KernelShift,
|
||||
KernelExecute = Execute << KernelShift,
|
||||
KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift,
|
||||
KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift,
|
||||
KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift,
|
||||
|
||||
NotMapped = (1 << (2 * KernelShift)),
|
||||
|
||||
KernelReadWrite = KernelRead | KernelWrite,
|
||||
KernelReadExecute = KernelRead | KernelExecute,
|
||||
|
||||
UserRead = Read | KernelRead,
|
||||
UserWrite = Write | KernelWrite,
|
||||
UserExecute = Execute,
|
||||
UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead,
|
||||
UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite,
|
||||
UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute),
|
||||
|
||||
UserReadWrite = UserRead | UserWrite,
|
||||
UserReadExecute = UserRead | UserExecute,
|
||||
|
||||
IpcLockChangeMask = NotMapped | UserReadWrite
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
|
||||
IpcLockChangeMask = NotMapped | UserReadWrite,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
|
||||
|
||||
@@ -168,9 +175,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
|
||||
|
||||
enum class KMemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
DontCareMask = 0x80,
|
||||
All = 0xFF,
|
||||
UserMask = All,
|
||||
|
||||
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||
@@ -178,76 +184,114 @@ enum class KMemoryAttribute : u8 {
|
||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||
|
||||
SetMask = Uncached,
|
||||
|
||||
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
|
||||
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
|
||||
enum class KMemoryBlockDisableMergeAttribute : u8 {
|
||||
None = 0,
|
||||
Normal = (1u << 0),
|
||||
DeviceLeft = (1u << 1),
|
||||
IpcLeft = (1u << 2),
|
||||
Locked = (1u << 3),
|
||||
DeviceRight = (1u << 4),
|
||||
|
||||
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
|
||||
AllRight = DeviceRight,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
|
||||
|
||||
struct KMemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
KMemoryAttribute attribute{};
|
||||
KMemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
uintptr_t m_address;
|
||||
size_t m_size;
|
||||
KMemoryState m_state;
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
KMemoryPermission m_permission;
|
||||
KMemoryAttribute m_attribute;
|
||||
KMemoryPermission m_original_permission;
|
||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||
|
||||
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
.base_address = m_address,
|
||||
.size = m_size,
|
||||
.state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
|
||||
.attribute =
|
||||
static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
|
||||
.permission =
|
||||
static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
|
||||
.ipc_count = m_ipc_lock_count,
|
||||
.device_count = m_device_use_count,
|
||||
.padding = {},
|
||||
};
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
constexpr uintptr_t GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return size;
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return m_size;
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
|
||||
constexpr size_t GetNumPages() const {
|
||||
return this->GetSize() / PageSize;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
|
||||
constexpr uintptr_t GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
|
||||
constexpr uintptr_t GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcLockCount() const {
|
||||
return m_ipc_lock_count;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcDisableMergeCount() const {
|
||||
return m_ipc_disable_merge_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryState GetState() const {
|
||||
return state;
|
||||
}
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return attribute;
|
||||
return m_state;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return perm;
|
||||
return m_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||
return m_original_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||
return m_disable_merge_attribute;
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlock final {
|
||||
friend class KMemoryBlockManager;
|
||||
|
||||
class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
KMemoryState state{KMemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
KMemoryPermission perm{KMemoryPermission::None};
|
||||
KMemoryPermission original_perm{KMemoryPermission::None};
|
||||
KMemoryAttribute attribute{KMemoryAttribute::None};
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
VAddr m_address;
|
||||
size_t m_num_pages;
|
||||
KMemoryState m_memory_state;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
KMemoryPermission m_permission;
|
||||
KMemoryPermission m_original_permission;
|
||||
KMemoryAttribute m_attribute;
|
||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
||||
@@ -261,113 +305,361 @@ public:
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr KMemoryBlock() = default;
|
||||
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
|
||||
KMemoryPermission perm_, KMemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return addr;
|
||||
return m_address;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return num_pages;
|
||||
constexpr size_t GetNumPages() const {
|
||||
return m_num_pages;
|
||||
}
|
||||
|
||||
constexpr std::size_t GetSize() const {
|
||||
return GetNumPages() * PageSize;
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
return GetEndAddress() - 1;
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcLockCount() const {
|
||||
return m_ipc_lock_count;
|
||||
}
|
||||
|
||||
constexpr u16 GetIpcDisableMergeCount() const {
|
||||
return m_ipc_disable_merge_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return m_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
||||
return m_original_permission;
|
||||
}
|
||||
|
||||
constexpr KMemoryAttribute GetAttribute() const {
|
||||
return m_attribute;
|
||||
}
|
||||
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
.m_address = this->GetAddress(),
|
||||
.m_size = this->GetSize(),
|
||||
.m_state = m_memory_state,
|
||||
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
|
||||
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
|
||||
.m_ipc_lock_count = m_ipc_lock_count,
|
||||
.m_device_use_count = m_device_use_count,
|
||||
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
|
||||
.m_permission = m_permission,
|
||||
.m_attribute = m_attribute,
|
||||
.m_original_permission = m_original_permission,
|
||||
.m_disable_merge_attribute = m_disable_merge_attribute,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= KMemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
public:
|
||||
explicit KMemoryBlock() = default;
|
||||
|
||||
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr)
|
||||
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
|
||||
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
|
||||
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
|
||||
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
|
||||
m_original_permission(KMemoryPermission::None), m_attribute(attr),
|
||||
m_disable_merge_attribute() {}
|
||||
|
||||
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr) {
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_device_disable_merge_right_count = 0;
|
||||
m_address = addr;
|
||||
m_num_pages = np;
|
||||
m_memory_state = ms;
|
||||
m_ipc_lock_count = 0;
|
||||
m_device_use_count = 0;
|
||||
m_permission = p;
|
||||
m_original_permission = KMemoryPermission::None;
|
||||
m_attribute = attr;
|
||||
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
|
||||
}
|
||||
|
||||
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~KMemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
constexpr auto AttributeIgnoreMask =
|
||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
||||
return m_memory_state == s && m_permission == p &&
|
||||
(m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
|
||||
m_original_permission == rhs.m_original_permission &&
|
||||
m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
|
||||
m_device_use_count == rhs.m_device_use_count;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr start) const {
|
||||
return GetAddress() <= start && start <= GetEndAddress();
|
||||
constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
|
||||
return this->HasSameProperties(rhs) &&
|
||||
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
|
||||
KMemoryBlockDisableMergeAttribute::None &&
|
||||
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
|
||||
KMemoryBlockDisableMergeAttribute::None;
|
||||
}
|
||||
|
||||
constexpr void Add(std::size_t count) {
|
||||
ASSERT(count > 0);
|
||||
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||
|
||||
num_pages += count;
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
|
||||
KMemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == KMemoryPermission::None);
|
||||
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
constexpr void Add(const KMemoryBlock& added_block) {
|
||||
ASSERT(added_block.GetNumPages() > 0);
|
||||
ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
|
||||
this->GetEndAddress() + added_block.GetSize() - 1);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<KMemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
m_num_pages += added_block.GetNumPages();
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
|
||||
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
|
||||
bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
|
||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
|
||||
KMemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
block.ipc_lock_count = ipc_lock_count;
|
||||
block.device_use_count = device_use_count;
|
||||
block.perm = perm;
|
||||
block.original_perm = original_perm;
|
||||
block.attribute = attribute;
|
||||
m_memory_state = s;
|
||||
m_permission = p;
|
||||
m_attribute = static_cast<KMemoryAttribute>(
|
||||
a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
|
||||
addr = split_addr;
|
||||
num_pages -= block.num_pages;
|
||||
if (set_disable_merge_attr && set_mask != 0) {
|
||||
m_disable_merge_attribute = m_disable_merge_attribute |
|
||||
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
|
||||
}
|
||||
if (clear_mask != 0) {
|
||||
m_disable_merge_attribute = m_disable_merge_attribute &
|
||||
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
|
||||
}
|
||||
}
|
||||
|
||||
return block;
|
||||
constexpr void Split(KMemoryBlock* block, VAddr addr) {
|
||||
ASSERT(this->GetAddress() < addr);
|
||||
ASSERT(this->Contains(addr));
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
|
||||
block->m_address = m_address;
|
||||
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
|
||||
block->m_memory_state = m_memory_state;
|
||||
block->m_ipc_lock_count = m_ipc_lock_count;
|
||||
block->m_device_use_count = m_device_use_count;
|
||||
block->m_permission = m_permission;
|
||||
block->m_original_permission = m_original_permission;
|
||||
block->m_attribute = m_attribute;
|
||||
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
|
||||
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
|
||||
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
|
||||
block->m_device_disable_merge_right_count = 0;
|
||||
|
||||
m_address = addr;
|
||||
m_num_pages -= block->m_num_pages;
|
||||
|
||||
m_ipc_disable_merge_count = 0;
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
// New permission/right aren't used.
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
||||
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
|
||||
ASSERT(new_device_disable_merge_left_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
// New permission/left aren't used.
|
||||
if (right) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
|
||||
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
|
||||
ASSERT(new_device_disable_merge_right_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
|
||||
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must either be shared or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
m_device_use_count == 0);
|
||||
|
||||
// Share.
|
||||
const u16 new_count = ++m_device_use_count;
|
||||
ASSERT(new_count > 0);
|
||||
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
// New permission/right aren't used.
|
||||
|
||||
if (left) {
|
||||
if (!m_device_disable_merge_left_count) {
|
||||
return;
|
||||
}
|
||||
--m_device_disable_merge_left_count;
|
||||
}
|
||||
|
||||
m_device_disable_merge_left_count =
|
||||
std::min(m_device_disable_merge_left_count, m_device_use_count);
|
||||
|
||||
if (m_device_disable_merge_left_count == 0) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
// New permission/left aren't used.
|
||||
|
||||
if (right) {
|
||||
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
|
||||
ASSERT(old_device_disable_merge_right_count > 0);
|
||||
if (old_device_disable_merge_right_count == 1) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
|
||||
// Unhare.
|
||||
const u16 old_count = m_device_use_count--;
|
||||
ASSERT(old_count > 0);
|
||||
|
||||
if (old_count == 1) {
|
||||
m_attribute =
|
||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
||||
}
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
|
||||
// Unhare.
|
||||
const u16 old_count = m_device_use_count--;
|
||||
ASSERT(old_count > 0);
|
||||
|
||||
if (old_count == 1) {
|
||||
m_attribute =
|
||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
||||
}
|
||||
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
// We must either be locked or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
|
||||
m_ipc_lock_count == 0);
|
||||
|
||||
// Lock.
|
||||
const u16 new_lock_count = ++m_ipc_lock_count;
|
||||
ASSERT(new_lock_count > 0);
|
||||
|
||||
// If this is our first lock, update our permissions.
|
||||
if (new_lock_count == 1) {
|
||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
||||
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
|
||||
(m_permission | KMemoryPermission::NotMapped));
|
||||
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
|
||||
KMemoryPermission::UserExecute ||
|
||||
(new_perm == KMemoryPermission::UserRead));
|
||||
m_original_permission = m_permission;
|
||||
m_permission = static_cast<KMemoryPermission>(
|
||||
(new_perm & KMemoryPermission::IpcLockChangeMask) |
|
||||
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
|
||||
}
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
|
||||
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
|
||||
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
|
||||
ASSERT(new_ipc_disable_merge_count > 0);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
[[maybe_unused]] bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be locked.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
|
||||
|
||||
// Unlock.
|
||||
const u16 old_lock_count = m_ipc_lock_count--;
|
||||
ASSERT(old_lock_count > 0);
|
||||
|
||||
// If this is our last unlock, update our permissions.
|
||||
if (old_lock_count == 1) {
|
||||
ASSERT(m_original_permission != KMemoryPermission::None);
|
||||
m_permission = m_original_permission;
|
||||
m_original_permission = KMemoryPermission::None;
|
||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
|
||||
}
|
||||
|
||||
if (left) {
|
||||
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
|
||||
ASSERT(old_ipc_disable_merge_count > 0);
|
||||
if (old_ipc_disable_merge_count == 1) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
||||
return m_disable_merge_attribute;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||
|
||||
@@ -2,221 +2,336 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
|
||||
: start_addr{start_addr_}, end_addr{end_addr_} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryAttribute::None);
|
||||
KMemoryBlockManager::KMemoryBlockManager() = default;
|
||||
|
||||
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
|
||||
// Allocate a block to encapsulate the address space, insert it into the tree.
|
||||
KMemoryBlock* start_block = slab_manager->Allocate();
|
||||
R_UNLESS(start_block != nullptr, ResultOutOfResource);
|
||||
|
||||
// Set our start and end.
|
||||
m_start_address = st;
|
||||
m_end_address = nd;
|
||||
ASSERT(Common::IsAligned(m_start_address, PageSize));
|
||||
ASSERT(Common::IsAligned(m_end_address, PageSize));
|
||||
|
||||
// Initialize and insert the block.
|
||||
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
|
||||
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
|
||||
m_memory_block_tree.insert(*start_block);
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
|
||||
return node;
|
||||
}
|
||||
node = std::next(node);
|
||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||
HostUnmapCallback&& host_unmap_callback) {
|
||||
// Erase every block until we have none left.
|
||||
auto it = m_memory_block_tree.begin();
|
||||
while (it != m_memory_block_tree.end()) {
|
||||
KMemoryBlock* block = std::addressof(*it);
|
||||
it = m_memory_block_tree.erase(it);
|
||||
slab_manager->Free(block);
|
||||
host_unmap_callback(block->GetAddress(), block->GetSize());
|
||||
}
|
||||
return end();
|
||||
|
||||
ASSERT(m_memory_block_tree.empty());
|
||||
}
|
||||
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align,
|
||||
std::size_t offset, std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
|
||||
size_t num_pages, size_t alignment, size_t offset,
|
||||
size_t guard_pages) const {
|
||||
if (num_pages > 0) {
|
||||
const VAddr region_end = region_start + region_num_pages * PageSize;
|
||||
const VAddr region_last = region_end - 1;
|
||||
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
|
||||
it++) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
if (info.m_state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||
const VAddr region_last{region_end - 1};
|
||||
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||
const auto info{it->GetMemoryInfo()};
|
||||
if (region_last < info.GetAddress()) {
|
||||
break;
|
||||
}
|
||||
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
if (info.state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
|
||||
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
||||
|
||||
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||
area += guard_pages * PageSize;
|
||||
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const VAddr area_last = area_end - 1;
|
||||
|
||||
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||
|
||||
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||
const VAddr area_last{area_end - 1};
|
||||
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attribute) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages) {
|
||||
// Find the iterator now that we've updated.
|
||||
iterator it = this->FindIterator(address);
|
||||
if (address != m_start_address) {
|
||||
it--;
|
||||
}
|
||||
|
||||
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||
node = next_node;
|
||||
continue;
|
||||
}
|
||||
|
||||
iterator new_node{node};
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
// Coalesce blocks that we can.
|
||||
while (true) {
|
||||
iterator prev = it++;
|
||||
if (it == m_memory_block_tree.end()) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
iterator new_node{node};
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
|
||||
new_node->Update(state, perm, attribute);
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
if (prev->CanMergeWith(*it)) {
|
||||
KMemoryBlock* block = std::addressof(*it);
|
||||
m_memory_block_tree.erase(it);
|
||||
prev->Add(*block);
|
||||
allocator->Free(block);
|
||||
it = prev;
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(state, perm, attr)) {
|
||||
// If we already have the right properties, just advance.
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages =
|
||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
}
|
||||
} else {
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.GetAddress() != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
// If we need to, create a new block after and insert it.
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
||||
static_cast<u8>(clear_disable_attr));
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages, KMemoryState test_state,
|
||||
KMemoryPermission test_perm, KMemoryAttribute test_attr,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(test_state, test_perm, test_attr) &&
|
||||
!it->HasProperties(state, perm, attr)) {
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.GetAddress() != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
// If we need to, create a new block after and insert it.
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
// Update block state.
|
||||
it->Update(state, perm, attr, false, 0, 0);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
} else {
|
||||
// If we already have the right properties, just advance.
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages =
|
||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
}
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, MemoryBlockLockFunction lock_func,
|
||||
KMemoryPermission perm) {
|
||||
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
VAddr cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||
iterator new_node{node};
|
||||
const VAddr end_address = address + (num_pages * PageSize);
|
||||
|
||||
if (addr > cur_addr) {
|
||||
memory_block_tree.insert(node, block->Split(addr));
|
||||
}
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
if (update_end_addr < cur_end_addr) {
|
||||
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||
}
|
||||
// If we need to, create a new block before and insert it.
|
||||
if (cur_info.m_address != cur_address) {
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
lock_func(new_node, perm);
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
MergeAdjacent(new_node, next_node);
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
}
|
||||
|
||||
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||
break;
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
// If we need to, create a new block after and insert it.
|
||||
KMemoryBlock* new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
node = next_node;
|
||||
// Call the locked update function.
|
||||
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
|
||||
cur_info.GetEndAddress() == end_address);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
it++;
|
||||
}
|
||||
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
KMemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
it = std::next(it);
|
||||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
// Debug.
|
||||
bool KMemoryBlockManager::CheckState() const {
|
||||
// Loop over every block, ensuring that we are sorted and coalesced.
|
||||
auto it = m_memory_block_tree.cbegin();
|
||||
auto prev = it++;
|
||||
while (it != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
const KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
KMemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
next_it = std::next(next_it);
|
||||
// Sequential blocks which can be merged should be merged.
|
||||
if (prev->CanMergeWith(*it)) {
|
||||
return false;
|
||||
}
|
||||
memory_block_tree.erase(it_to_erase);
|
||||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
KMemoryBlock* prev{&(*std::prev(it))};
|
||||
// Sequential blocks should be sequential.
|
||||
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
// If the block is ipc locked, it must have a count.
|
||||
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
||||
cur_info.m_ipc_lock_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
prev->Add(block->GetNumPages());
|
||||
EraseIt(it);
|
||||
// If the block is device shared, it must have a count.
|
||||
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
||||
cur_info.m_device_use_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
it = prev_it;
|
||||
block = prev;
|
||||
// Advance the iterator.
|
||||
prev = it++;
|
||||
}
|
||||
|
||||
// Our loop will miss checking the last block, potentially, so check it.
|
||||
if (prev != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
// If the block is ipc locked, it must have a count.
|
||||
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
||||
prev_info.m_ipc_lock_count == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If the block is device shared, it must have a count.
|
||||
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
||||
prev_info.m_device_use_count == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const KMemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
EraseIt(std::next(it));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,63 +4,154 @@
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <list>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KMemoryBlockManagerUpdateAllocator {
|
||||
public:
|
||||
static constexpr size_t MaxBlocks = 2;
|
||||
|
||||
private:
|
||||
KMemoryBlock* m_blocks[MaxBlocks];
|
||||
size_t m_index;
|
||||
KMemoryBlockSlabManager* m_slab_manager;
|
||||
|
||||
private:
|
||||
Result Initialize(size_t num_blocks) {
|
||||
// Check num blocks.
|
||||
ASSERT(num_blocks <= MaxBlocks);
|
||||
|
||||
// Set index.
|
||||
m_index = MaxBlocks - num_blocks;
|
||||
|
||||
// Allocate the blocks.
|
||||
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
|
||||
m_blocks[m_index + i] = m_slab_manager->Allocate();
|
||||
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
public:
|
||||
KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
|
||||
size_t num_blocks = MaxBlocks)
|
||||
: m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
|
||||
*out_result = this->Initialize(num_blocks);
|
||||
}
|
||||
|
||||
~KMemoryBlockManagerUpdateAllocator() {
|
||||
for (const auto& block : m_blocks) {
|
||||
if (block != nullptr) {
|
||||
m_slab_manager->Free(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KMemoryBlock* Allocate() {
|
||||
ASSERT(m_index < MaxBlocks);
|
||||
ASSERT(m_blocks[m_index] != nullptr);
|
||||
KMemoryBlock* block = nullptr;
|
||||
std::swap(block, m_blocks[m_index++]);
|
||||
return block;
|
||||
}
|
||||
|
||||
void Free(KMemoryBlock* block) {
|
||||
ASSERT(m_index <= MaxBlocks);
|
||||
ASSERT(block != nullptr);
|
||||
if (m_index == 0) {
|
||||
m_slab_manager->Free(block);
|
||||
} else {
|
||||
m_blocks[--m_index] = block;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<KMemoryBlock>;
|
||||
using MemoryBlockTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
||||
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
|
||||
bool right);
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
|
||||
KMemoryBlockManager();
|
||||
|
||||
using HostUnmapCallback = std::function<void(VAddr, u64)>;
|
||||
|
||||
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
|
||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
return m_memory_block_tree.end();
|
||||
}
|
||||
const_iterator end() const {
|
||||
return memory_block_tree.end();
|
||||
return m_memory_block_tree.end();
|
||||
}
|
||||
const_iterator cend() const {
|
||||
return memory_block_tree.cend();
|
||||
return m_memory_block_tree.cend();
|
||||
}
|
||||
|
||||
iterator FindIterator(VAddr addr);
|
||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset, size_t guard_pages) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute);
|
||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm = KMemoryPermission::None,
|
||||
KMemoryAttribute attribute = KMemoryAttribute::None);
|
||||
iterator FindIterator(VAddr address) const {
|
||||
return m_memory_block_tree.find(KMemoryBlock(
|
||||
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
|
||||
}
|
||||
|
||||
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm);
|
||||
const KMemoryBlock* FindBlock(VAddr address) const {
|
||||
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
|
||||
using IterateFunc = std::function<void(const KMemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
KMemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
// Debug.
|
||||
bool CheckState() const;
|
||||
|
||||
private:
|
||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages);
|
||||
|
||||
MemoryBlockTree m_memory_block_tree;
|
||||
VAddr m_start_address{};
|
||||
VAddr m_end_address{};
|
||||
};
|
||||
|
||||
class KScopedMemoryBlockManagerAuditor {
|
||||
public:
|
||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
|
||||
ASSERT(m_manager->CheckState());
|
||||
}
|
||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
|
||||
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
|
||||
~KScopedMemoryBlockManagerAuditor() {
|
||||
ASSERT(m_manager->CheckState());
|
||||
}
|
||||
|
||||
private:
|
||||
void MergeAdjacent(iterator it, iterator& next_it);
|
||||
|
||||
[[maybe_unused]] const VAddr start_addr;
|
||||
[[maybe_unused]] const VAddr end_addr;
|
||||
|
||||
MemoryBlockTree memory_block_tree;
|
||||
KMemoryBlockManager* m_manager;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_
|
||||
}
|
||||
}
|
||||
|
||||
size_t KMemoryLayout::GetResourceRegionSizeForInit() {
|
||||
// Calculate resource region size based on whether we allow extra threads.
|
||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||
size_t resource_region_size =
|
||||
KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
|
||||
|
||||
return resource_region_size;
|
||||
size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
|
||||
return KernelResourceSize + KSystemControl::SecureAppletMemorySize +
|
||||
(use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
|
||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
|
||||
|
||||
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
|
||||
constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
|
||||
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
|
||||
constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
|
||||
|
||||
constexpr std::size_t KernelResourceSize =
|
||||
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||
constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize +
|
||||
KernelSlabHeapSize + KernelPageBufferHeapSize;
|
||||
|
||||
constexpr bool IsKernelAddressKey(VAddr key) {
|
||||
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
|
||||
@@ -168,6 +170,11 @@ public:
|
||||
KMemoryRegionType_VirtualDramKernelTraceBuffer));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetSecureAppletMemoryRegion() {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(
|
||||
KMemoryRegionType_VirtualDramKernelSecureAppletMemory));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
|
||||
return Dereference(FindVirtualLinear(address));
|
||||
}
|
||||
@@ -229,7 +236,7 @@ public:
|
||||
|
||||
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
|
||||
VAddr linear_virtual_start);
|
||||
static size_t GetResourceRegionSizeForInit();
|
||||
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
|
||||
|
||||
auto GetKernelRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
|
||||
@@ -279,6 +286,10 @@ public:
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelSlab);
|
||||
}
|
||||
auto GetKernelSecureAppletMemoryRegionPhysicalExtents() {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelSecureAppletMemory);
|
||||
}
|
||||
auto GetKernelPageTableHeapRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelPtHeap);
|
||||
|
||||
@@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||
return KMemoryManager::Pool::SystemNonSecure;
|
||||
} else {
|
||||
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
|
||||
return {};
|
||||
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KMemoryManager::KMemoryManager(Core::System& system_)
|
||||
: system{system_}, pool_locks{
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
KLightLock{system_.Kernel()},
|
||||
} {}
|
||||
KMemoryManager::KMemoryManager(Core::System& system)
|
||||
: m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()},
|
||||
m_pool_locks{
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
KLightLock{system.Kernel()},
|
||||
} {}
|
||||
|
||||
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
|
||||
|
||||
// Clear the management region to zero.
|
||||
const VAddr management_region_end = management_region + management_region_size;
|
||||
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||
|
||||
// Reset our manager count.
|
||||
num_managers = 0;
|
||||
m_num_managers = 0;
|
||||
|
||||
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
|
||||
while (num_managers != MaxManagerCount) {
|
||||
while (m_num_managers != MaxManagerCount) {
|
||||
// Locate the region that should initialize the current manager.
|
||||
PAddr region_address = 0;
|
||||
size_t region_size = 0;
|
||||
Pool region_pool = Pool::Count;
|
||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
// We only care about regions that we need to create managers for.
|
||||
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// We want to initialize the managers in order.
|
||||
if (it.GetAttributes() != num_managers) {
|
||||
if (it.GetAttributes() != m_num_managers) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||
}
|
||||
|
||||
// Initialize a new manager for the region.
|
||||
Impl* manager = std::addressof(managers[num_managers++]);
|
||||
ASSERT(num_managers <= managers.size());
|
||||
Impl* manager = std::addressof(m_managers[m_num_managers++]);
|
||||
ASSERT(m_num_managers <= m_managers.size());
|
||||
|
||||
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
|
||||
management_region_end, region_pool);
|
||||
@@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||
|
||||
// Insert the manager into the pool list.
|
||||
const auto region_pool_index = static_cast<u32>(region_pool);
|
||||
if (pool_managers_tail[region_pool_index] == nullptr) {
|
||||
pool_managers_head[region_pool_index] = manager;
|
||||
if (m_pool_managers_tail[region_pool_index] == nullptr) {
|
||||
m_pool_managers_head[region_pool_index] = manager;
|
||||
} else {
|
||||
pool_managers_tail[region_pool_index]->SetNext(manager);
|
||||
manager->SetPrev(pool_managers_tail[region_pool_index]);
|
||||
m_pool_managers_tail[region_pool_index]->SetNext(manager);
|
||||
manager->SetPrev(m_pool_managers_tail[region_pool_index]);
|
||||
}
|
||||
pool_managers_tail[region_pool_index] = manager;
|
||||
m_pool_managers_tail[region_pool_index] = manager;
|
||||
}
|
||||
|
||||
// Free each region to its corresponding heap.
|
||||
@@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||
const PAddr ini_last = ini_end - 1;
|
||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
// Get the manager for the region.
|
||||
auto index = it.GetAttributes();
|
||||
auto& manager = managers[index];
|
||||
auto& manager = m_managers[it.GetAttributes()];
|
||||
|
||||
const PAddr cur_start = it.GetAddress();
|
||||
const PAddr cur_last = it.GetLastAddress();
|
||||
@@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||
}
|
||||
|
||||
// Update the used size for all managers.
|
||||
for (size_t i = 0; i < num_managers; ++i) {
|
||||
managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||
for (size_t i = 0; i < m_num_managers; ++i) {
|
||||
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
||||
// Early return if we're allocating no pages.
|
||||
if (num_pages == 0) {
|
||||
@@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
|
||||
// Lock the pool that we're allocating from.
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);
|
||||
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
|
||||
@@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
PAddr allocated_block = 0;
|
||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
|
||||
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
||||
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
|
||||
if (allocated_block != 0) {
|
||||
break;
|
||||
}
|
||||
@@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
return 0;
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some.
|
||||
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// Open the first reference to the pages.
|
||||
@@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
Direction dir, bool unoptimized, bool random) {
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
||||
|
||||
// Ensure that we don't leave anything un-freed.
|
||||
auto group_guard = SCOPE_GUARD({
|
||||
ON_RESULT_FAILURE {
|
||||
for (const auto& it : out->Nodes()) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
|
||||
const size_t num_pages_to_free =
|
||||
auto& manager = this->GetManager(it.GetAddress());
|
||||
const size_t node_num_pages =
|
||||
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||
manager.Free(it.GetAddress(), num_pages_to_free);
|
||||
manager.Free(it.GetAddress(), node_num_pages);
|
||||
}
|
||||
});
|
||||
out->Finalize();
|
||||
};
|
||||
|
||||
// Keep allocating until we've allocated all our pages.
|
||||
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
||||
@@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||
break;
|
||||
}
|
||||
|
||||
// Safely add it to our group.
|
||||
{
|
||||
auto block_guard =
|
||||
SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
block_guard.Cancel();
|
||||
// Ensure we don't leak the block if we fail.
|
||||
ON_RESULT_FAILURE_2 {
|
||||
cur_manager->Free(allocated_block, pages_per_alloc);
|
||||
};
|
||||
|
||||
// Add the block to our group.
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
|
||||
// Maintain the optimized memory bitmap, if we should.
|
||||
if (unoptimized) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
num_pages -= pages_per_alloc;
|
||||
@@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||
R_UNLESS(num_pages == 0, ResultOutOfMemory);
|
||||
|
||||
// We succeeded!
|
||||
group_guard.Cancel();
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
||||
@@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||
|
||||
// Lock the pool that we're allocating from.
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
// Allocate the page group.
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir,
|
||||
m_has_optimized_process[static_cast<size_t>(pool)], true));
|
||||
|
||||
// Open the first reference to the pages.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
@@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||
size_t remaining_pages = block.GetNumPages();
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
@@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
@@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
|
||||
// Allocate the memory.
|
||||
bool optimized;
|
||||
{
|
||||
// Lock the pool that we're allocating from.
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
// Check if we have an optimized process.
|
||||
const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)];
|
||||
const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;
|
||||
|
||||
// Allocate the page group.
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized,
|
||||
false));
|
||||
|
||||
// Open the first reference to the pages.
|
||||
// Set whether we should optimize.
|
||||
optimized = has_optimized && is_optimized;
|
||||
}
|
||||
|
||||
// Perform optimized memory tracking, if we should.
|
||||
if (optimized) {
|
||||
// Iterate over the allocated blocks.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
PAddr cur_address = block.GetAddress();
|
||||
size_t remaining_pages = block.GetNumPages();
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
||||
// Get the block extents.
|
||||
const PAddr block_address = block.GetAddress();
|
||||
const size_t block_pages = block.GetNumPages();
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
manager.OpenFirst(cur_address, cur_pages);
|
||||
// If it has no pages, we don't need to do anything.
|
||||
if (block_pages == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
// Fill all the pages that we need to fill.
|
||||
bool any_new = false;
|
||||
{
|
||||
PAddr cur_address = block_address;
|
||||
size_t remaining_pages = block_pages;
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Process part or all of the block.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
any_new =
|
||||
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
|
||||
// If there are new pages, update tracking for the allocation.
|
||||
if (any_new) {
|
||||
// Update tracking for the allocation.
|
||||
PAddr cur_address = block_address;
|
||||
size_t remaining_pages = block_pages;
|
||||
while (remaining_pages > 0) {
|
||||
// Get the manager for the current address.
|
||||
auto& manager = this->GetManager(cur_address);
|
||||
|
||||
// Lock the pool for the manager.
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
|
||||
// Track some or all of the current pages.
|
||||
const size_t cur_pages =
|
||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||
manager.TrackOptimizedAllocation(cur_address, cur_pages);
|
||||
|
||||
// Advance.
|
||||
cur_address += cur_pages * PageSize;
|
||||
remaining_pages -= cur_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set all the allocated memory.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
|
||||
block.GetSize());
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void KMemoryManager::Open(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Open(address, cur_pages);
|
||||
} else {
|
||||
// Set all the allocated memory.
|
||||
for (const auto& block : out->Nodes()) {
|
||||
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||
block.GetSize());
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(PAddr address, size_t num_pages) {
|
||||
// Repeatedly close references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Close(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Close(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
}
|
||||
void KMemoryManager::Open(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Open(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
|
||||
@@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
|
||||
ASSERT(Common::IsAligned(total_management_size, PageSize));
|
||||
|
||||
// Setup region.
|
||||
pool = p;
|
||||
management_region = management;
|
||||
page_reference_counts.resize(
|
||||
m_pool = p;
|
||||
m_management_region = management;
|
||||
m_page_reference_counts.resize(
|
||||
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
|
||||
ASSERT(Common::IsAligned(management_region, PageSize));
|
||||
ASSERT(Common::IsAligned(m_management_region, PageSize));
|
||||
|
||||
// Initialize the manager's KPageHeap.
|
||||
heap.Initialize(address, size, management + manager_size, page_heap_size);
|
||||
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
|
||||
|
||||
return total_management_size;
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
|
||||
u8 fill_pattern) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const size_t optimize_map_size =
|
||||
|
||||
@@ -21,11 +21,8 @@ namespace Kernel {
|
||||
|
||||
class KPageGroup;
|
||||
|
||||
class KMemoryManager final {
|
||||
class KMemoryManager {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KMemoryManager);
|
||||
YUZU_NON_MOVEABLE(KMemoryManager);
|
||||
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
Applet = 1,
|
||||
@@ -45,16 +42,85 @@ public:
|
||||
enum class Direction : u32 {
|
||||
FromFront = 0,
|
||||
FromBack = 1,
|
||||
|
||||
Shift = 0,
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
explicit KMemoryManager(Core::System& system_);
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
|
||||
explicit KMemoryManager(Core::System& system);
|
||||
|
||||
void Initialize(VAddr management_region, size_t management_region_size);
|
||||
|
||||
constexpr size_t GetSize(Pool pool) const {
|
||||
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
||||
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
|
||||
Pool GetPool(PAddr address) const {
|
||||
return this->GetManager(address).GetPool();
|
||||
}
|
||||
|
||||
void Open(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Open(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void OpenFirst(PAddr address, size_t num_pages) {
|
||||
// Repeatedly open references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.OpenFirst(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(PAddr address, size_t num_pages) {
|
||||
// Repeatedly close references until we've done so for all pages.
|
||||
while (num_pages) {
|
||||
auto& manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||
|
||||
{
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||
manager.Close(address, cur_pages);
|
||||
}
|
||||
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetSize() {
|
||||
size_t total = 0;
|
||||
for (size_t i = 0; i < m_num_managers; i++) {
|
||||
total += m_managers[i].GetSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
size_t GetSize(Pool pool) {
|
||||
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||
size_t total = 0;
|
||||
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||
@@ -64,18 +130,36 @@ public:
|
||||
return total;
|
||||
}
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
size_t GetFreeSize() {
|
||||
size_t total = 0;
|
||||
for (size_t i = 0; i < m_num_managers; i++) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]);
|
||||
total += m_managers[i].GetFreeSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
size_t GetFreeSize(Pool pool) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
void Close(PAddr address, size_t num_pages);
|
||||
void Close(const KPageGroup& pg);
|
||||
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||
size_t total = 0;
|
||||
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||
manager = this->GetNextManager(manager, GetSizeDirection)) {
|
||||
total += manager->GetFreeSize();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
void Open(PAddr address, size_t num_pages);
|
||||
void Open(const KPageGroup& pg);
|
||||
void DumpFreeList(Pool pool) {
|
||||
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||
|
||||
constexpr Direction DumpDirection = Direction::FromFront;
|
||||
for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr;
|
||||
manager = this->GetNextManager(manager, DumpDirection)) {
|
||||
manager->DumpFreeList();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
@@ -88,14 +172,13 @@ public:
|
||||
}
|
||||
|
||||
static constexpr Pool GetPool(u32 option) {
|
||||
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
|
||||
return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>
|
||||
static_cast<u32>(Pool::Shift));
|
||||
}
|
||||
|
||||
static constexpr Direction GetDirection(u32 option) {
|
||||
return static_cast<Direction>(
|
||||
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||
@@ -103,74 +186,88 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
class Impl final {
|
||||
class Impl {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(Impl);
|
||||
YUZU_NON_MOVEABLE(Impl);
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size);
|
||||
|
||||
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
~Impl() = default;
|
||||
|
||||
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
|
||||
Pool p);
|
||||
|
||||
VAddr AllocateBlock(s32 index, bool random) {
|
||||
return heap.AllocateBlock(index, random);
|
||||
PAddr AllocateBlock(s32 index, bool random) {
|
||||
return m_heap.AllocateBlock(index, random);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, size_t num_pages) {
|
||||
heap.Free(addr, num_pages);
|
||||
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
|
||||
return m_heap.AllocateAligned(index, num_pages, align_pages);
|
||||
}
|
||||
void Free(PAddr addr, size_t num_pages) {
|
||||
m_heap.Free(addr, num_pages);
|
||||
}
|
||||
|
||||
void SetInitialUsedHeapSize(size_t reserved_size) {
|
||||
heap.SetInitialUsedSize(reserved_size);
|
||||
m_heap.SetInitialUsedSize(reserved_size);
|
||||
}
|
||||
|
||||
void InitializeOptimizedMemory() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
|
||||
void TrackOptimizedAllocation(PAddr block, size_t num_pages);
|
||||
|
||||
bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
|
||||
|
||||
constexpr Pool GetPool() const {
|
||||
return pool;
|
||||
return m_pool;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return heap.GetSize();
|
||||
return m_heap.GetSize();
|
||||
}
|
||||
constexpr PAddr GetEndAddress() const {
|
||||
return m_heap.GetEndAddress();
|
||||
}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap.GetAddress();
|
||||
size_t GetFreeSize() const {
|
||||
return m_heap.GetFreeSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return heap.GetEndAddress();
|
||||
void DumpFreeList() const {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
constexpr size_t GetPageOffset(PAddr address) const {
|
||||
return heap.GetPageOffset(address);
|
||||
return m_heap.GetPageOffset(address);
|
||||
}
|
||||
|
||||
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
|
||||
return heap.GetPageOffsetToEnd(address);
|
||||
return m_heap.GetPageOffsetToEnd(address);
|
||||
}
|
||||
|
||||
constexpr void SetNext(Impl* n) {
|
||||
next = n;
|
||||
m_next = n;
|
||||
}
|
||||
|
||||
constexpr void SetPrev(Impl* n) {
|
||||
prev = n;
|
||||
m_prev = n;
|
||||
}
|
||||
|
||||
constexpr Impl* GetNext() const {
|
||||
return next;
|
||||
return m_next;
|
||||
}
|
||||
|
||||
constexpr Impl* GetPrev() const {
|
||||
return prev;
|
||||
return m_prev;
|
||||
}
|
||||
|
||||
void OpenFirst(PAddr address, size_t num_pages) {
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++page_reference_counts[index]);
|
||||
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||
ASSERT(ref_count == 1);
|
||||
|
||||
index++;
|
||||
@@ -181,7 +278,7 @@ private:
|
||||
size_t index = this->GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++page_reference_counts[index]);
|
||||
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||
ASSERT(ref_count > 1);
|
||||
|
||||
index++;
|
||||
@@ -195,8 +292,8 @@ private:
|
||||
size_t free_start = 0;
|
||||
size_t free_count = 0;
|
||||
while (index < end) {
|
||||
ASSERT(page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--page_reference_counts[index]);
|
||||
ASSERT(m_page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--m_page_reference_counts[index]);
|
||||
|
||||
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
|
||||
if (ref_count == 0) {
|
||||
@@ -208,7 +305,7 @@ private:
|
||||
}
|
||||
} else {
|
||||
if (free_count > 0) {
|
||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
||||
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||
free_count = 0;
|
||||
}
|
||||
}
|
||||
@@ -217,44 +314,36 @@ private:
|
||||
}
|
||||
|
||||
if (free_count > 0) {
|
||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
||||
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size);
|
||||
|
||||
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
KPageHeap heap;
|
||||
std::vector<RefCount> page_reference_counts;
|
||||
VAddr management_region{};
|
||||
Pool pool{};
|
||||
Impl* next{};
|
||||
Impl* prev{};
|
||||
KPageHeap m_heap;
|
||||
std::vector<RefCount> m_page_reference_counts;
|
||||
VAddr m_management_region{};
|
||||
Pool m_pool{};
|
||||
Impl* m_next{};
|
||||
Impl* m_prev{};
|
||||
};
|
||||
|
||||
private:
|
||||
Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
|
||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
Impl& GetManager(PAddr address) {
|
||||
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
|
||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
const Impl& GetManager(PAddr address) const {
|
||||
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||
}
|
||||
|
||||
constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
|
||||
return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
|
||||
: pool_managers_head[static_cast<size_t>(pool)];
|
||||
constexpr Impl* GetFirstManager(Pool pool, Direction dir) {
|
||||
return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)]
|
||||
: m_pool_managers_head[static_cast<size_t>(pool)];
|
||||
}
|
||||
|
||||
constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
|
||||
constexpr Impl* GetNextManager(Impl* cur, Direction dir) {
|
||||
if (dir == Direction::FromBack) {
|
||||
return cur->GetPrev();
|
||||
} else {
|
||||
@@ -263,15 +352,21 @@ private:
|
||||
}
|
||||
|
||||
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
||||
bool random);
|
||||
bool unoptimized, bool random);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
|
||||
std::array<Impl*, MaxManagerCount> pool_managers_head{};
|
||||
std::array<Impl*, MaxManagerCount> pool_managers_tail{};
|
||||
std::array<Impl, MaxManagerCount> managers;
|
||||
size_t num_managers{};
|
||||
template <typename T>
|
||||
using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>;
|
||||
|
||||
Core::System& m_system;
|
||||
const KMemoryLayout& m_memory_layout;
|
||||
PoolArray<KLightLock> m_pool_locks;
|
||||
std::array<Impl*, MaxManagerCount> m_pool_managers_head{};
|
||||
std::array<Impl*, MaxManagerCount> m_pool_managers_tail{};
|
||||
std::array<Impl, MaxManagerCount> m_managers;
|
||||
size_t m_num_managers{};
|
||||
PoolArray<u64> m_optimized_process_ids{};
|
||||
PoolArray<bool> m_has_optimized_process{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -142,32 +142,38 @@ private:
|
||||
|
||||
} // namespace impl
|
||||
|
||||
constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
|
||||
constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
|
||||
constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
|
||||
constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
|
||||
|
||||
constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
|
||||
constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
|
||||
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
|
||||
static_assert(KMemoryRegionType_Dram.GetValue() == 0x2);
|
||||
|
||||
constexpr auto KMemoryRegionType_DramKernelBase =
|
||||
// constexpr inline auto KMemoryRegionType_CoreLocalRegion =
|
||||
// KMemoryRegionType_None.DeriveInitial(2).Finalize();
|
||||
// static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4);
|
||||
|
||||
constexpr inline auto KMemoryRegionType_DramKernelBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(0, 3, 0)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap)
|
||||
.SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||
constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
|
||||
constexpr auto KMemoryRegionType_DramHeapBase =
|
||||
constexpr inline auto KMemoryRegionType_DramReservedBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
|
||||
constexpr inline auto KMemoryRegionType_DramHeapBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelBase.GetValue() ==
|
||||
(0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));
|
||||
static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramKernelCode =
|
||||
constexpr inline auto KMemoryRegionType_DramKernelCode =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0);
|
||||
constexpr auto KMemoryRegionType_DramKernelSlab =
|
||||
constexpr inline auto KMemoryRegionType_DramKernelSlab =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1);
|
||||
constexpr auto KMemoryRegionType_DramKernelPtHeap =
|
||||
constexpr inline auto KMemoryRegionType_DramKernelPtHeap =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
constexpr auto KMemoryRegionType_DramKernelInitPt =
|
||||
constexpr inline auto KMemoryRegionType_DramKernelInitPt =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelCode.GetValue() ==
|
||||
@@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
|
||||
(0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramReservedEarly =
|
||||
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
|
||||
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_DramReservedEarly =
|
||||
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() ==
|
||||
(0x16 | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelTraceBuffer =
|
||||
constexpr inline auto KMemoryRegionType_KernelTraceBuffer =
|
||||
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0)
|
||||
.SetAttribute(KMemoryRegionAttr_LinearMapped)
|
||||
.SetAttribute(KMemoryRegionAttr_UserReadOnly);
|
||||
constexpr auto KMemoryRegionType_OnMemoryBootImage =
|
||||
constexpr inline auto KMemoryRegionType_OnMemoryBootImage =
|
||||
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1);
|
||||
constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
|
||||
constexpr inline auto KMemoryRegionType_DTB =
|
||||
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
|
||||
static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() ==
|
||||
(0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));
|
||||
static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);
|
||||
static_assert(KMemoryRegionType_DTB.GetValue() == 0x256);
|
||||
|
||||
constexpr auto KMemoryRegionType_DramPoolPartition =
|
||||
constexpr inline auto KMemoryRegionType_DramPoolPartition =
|
||||
KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
|
||||
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramPoolManagement =
|
||||
constexpr inline auto KMemoryRegionType_DramPoolManagement =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
|
||||
KMemoryRegionAttr_CarveoutProtected);
|
||||
constexpr auto KMemoryRegionType_DramUserPool =
|
||||
constexpr inline auto KMemoryRegionType_DramUserPool =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
|
||||
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
||||
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
@@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
||||
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
|
||||
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
|
||||
constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
|
||||
constexpr auto KMemoryRegionType_DramSystemNonSecurePool =
|
||||
constexpr inline auto KMemoryRegionType_DramApplicationPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 0);
|
||||
constexpr inline auto KMemoryRegionType_DramAppletPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 1);
|
||||
constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 2);
|
||||
constexpr auto KMemoryRegionType_DramSystemPool =
|
||||
constexpr inline auto KMemoryRegionType_DramSystemPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
|
||||
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
@@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
|
||||
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_CarveoutProtected));
|
||||
|
||||
constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||
|
||||
// UNUSED: .DeriveSparse(2, 2, 0);
|
||||
constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
|
||||
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
||||
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
|
||||
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
|
||||
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
||||
constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramUserPool =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
|
||||
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
|
||||
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
|
||||
|
||||
// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying
|
||||
// to understand why Nintendo made this choice.
|
||||
// NOTE: For unknown reason, the pools are derived out-of-order here.
|
||||
// It's worth eventually trying to understand why Nintendo made this choice.
|
||||
// UNUSED: .Derive(6, 0);
|
||||
// UNUSED: .Derive(6, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramAppletPool =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
|
||||
constexpr auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
|
||||
constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
|
||||
constexpr auto KMemoryRegionType_VirtualDramSystemPool =
|
||||
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
|
||||
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
|
||||
|
||||
constexpr auto KMemoryRegionType_ArchDeviceBase =
|
||||
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
|
||||
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
|
||||
constexpr auto KMemoryRegionType_BoardDeviceBase =
|
||||
constexpr inline auto KMemoryRegionType_BoardDeviceBase =
|
||||
KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
|
||||
static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5);
|
||||
static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
|
||||
@@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
|
||||
#error "Unimplemented"
|
||||
#else
|
||||
// Default to no architecture devices.
|
||||
constexpr auto NumArchitectureDeviceRegions = 0;
|
||||
constexpr inline auto NumArchitectureDeviceRegions = 0;
|
||||
#endif
|
||||
static_assert(NumArchitectureDeviceRegions >= 0);
|
||||
|
||||
@@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0);
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc"
|
||||
#else
|
||||
// Default to no board devices.
|
||||
constexpr auto NumBoardDeviceRegions = 0;
|
||||
constexpr inline auto NumBoardDeviceRegions = 0;
|
||||
#endif
|
||||
static_assert(NumBoardDeviceRegions >= 0);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
|
||||
constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
|
||||
constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
|
||||
constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
|
||||
constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
|
||||
constexpr inline auto KMemoryRegionType_KernelStack =
|
||||
KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
|
||||
constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
|
||||
constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
|
||||
static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19);
|
||||
static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);
|
||||
static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49);
|
||||
static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelMiscDerivedBase =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase =
|
||||
KMemoryRegionType_KernelMisc.DeriveTransition();
|
||||
static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);
|
||||
|
||||
// UNUSED: .Derive(7, 0);
|
||||
constexpr auto KMemoryRegionType_KernelMiscMainStack =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscMainStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
|
||||
constexpr auto KMemoryRegionType_KernelMiscMappedDevice =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
|
||||
constexpr auto KMemoryRegionType_KernelMiscExceptionStack =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
|
||||
constexpr auto KMemoryRegionType_KernelMiscUnknownDebug =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
|
||||
// UNUSED: .Derive(7, 5);
|
||||
constexpr auto KMemoryRegionType_KernelMiscIdleStack =
|
||||
constexpr inline auto KMemoryRegionType_KernelMiscIdleStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
|
||||
static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49);
|
||||
static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49);
|
||||
@@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
|
||||
static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549);
|
||||
static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
|
||||
constexpr inline auto KMemoryRegionType_KernelTemp =
|
||||
KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
|
||||
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
|
||||
|
||||
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||
@@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
|
||||
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
||||
return KMemoryRegionType_VirtualDramUnknownDebug;
|
||||
} else {
|
||||
|
||||
@@ -16,107 +16,126 @@
|
||||
namespace Kernel {
|
||||
|
||||
class KPageBitmap {
|
||||
private:
|
||||
public:
|
||||
class RandomBitGenerator {
|
||||
private:
|
||||
Common::TinyMT rng{};
|
||||
u32 entropy{};
|
||||
u32 bits_available{};
|
||||
|
||||
private:
|
||||
void RefreshEntropy() {
|
||||
entropy = rng.GenerateRandomU32();
|
||||
bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>());
|
||||
}
|
||||
|
||||
bool GenerateRandomBit() {
|
||||
if (bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
const bool rnd_bit = (entropy & 1) != 0;
|
||||
entropy >>= 1;
|
||||
--bits_available;
|
||||
return rnd_bit;
|
||||
}
|
||||
|
||||
public:
|
||||
RandomBitGenerator() {
|
||||
rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||
}
|
||||
|
||||
std::size_t SelectRandomBit(u64 bitmap) {
|
||||
u64 SelectRandomBit(u64 bitmap) {
|
||||
u64 selected = 0;
|
||||
|
||||
u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2;
|
||||
u64 cur_mask = (1ULL << cur_num_bits) - 1;
|
||||
for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0;
|
||||
cur_num_bits /= 2) {
|
||||
const u64 high = (bitmap >> cur_num_bits);
|
||||
const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits)));
|
||||
|
||||
while (cur_num_bits) {
|
||||
const u64 low = (bitmap >> 0) & cur_mask;
|
||||
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
|
||||
|
||||
bool choose_low;
|
||||
if (high == 0) {
|
||||
// If only low val is set, choose low.
|
||||
choose_low = true;
|
||||
} else if (low == 0) {
|
||||
// If only high val is set, choose high.
|
||||
choose_low = false;
|
||||
} else {
|
||||
// If both are set, choose random.
|
||||
choose_low = this->GenerateRandomBit();
|
||||
}
|
||||
|
||||
// If we chose low, proceed with low.
|
||||
if (choose_low) {
|
||||
bitmap = low;
|
||||
selected += 0;
|
||||
} else {
|
||||
// Choose high if we have high and (don't have low or select high randomly).
|
||||
if (high && (low == 0 || this->GenerateRandomBit())) {
|
||||
bitmap = high;
|
||||
selected += cur_num_bits;
|
||||
} else {
|
||||
bitmap = low;
|
||||
selected += 0;
|
||||
}
|
||||
|
||||
// Proceed.
|
||||
cur_num_bits /= 2;
|
||||
cur_mask >>= cur_num_bits;
|
||||
}
|
||||
|
||||
return selected;
|
||||
}
|
||||
|
||||
u64 GenerateRandom(u64 max) {
|
||||
// Determine the number of bits we need.
|
||||
const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max));
|
||||
|
||||
// Generate a random value of the desired bitwidth.
|
||||
const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed));
|
||||
|
||||
// Adjust the value to be in range.
|
||||
return rnd - ((rnd / max) * max);
|
||||
}
|
||||
|
||||
private:
|
||||
void RefreshEntropy() {
|
||||
m_entropy = m_rng.GenerateRandomU32();
|
||||
m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>());
|
||||
}
|
||||
|
||||
bool GenerateRandomBit() {
|
||||
if (m_bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
const bool rnd_bit = (m_entropy & 1) != 0;
|
||||
m_entropy >>= 1;
|
||||
--m_bits_available;
|
||||
return rnd_bit;
|
||||
}
|
||||
|
||||
u64 GenerateRandomBits(u32 num_bits) {
|
||||
u64 result = 0;
|
||||
|
||||
// Iteratively add random bits to our result.
|
||||
while (num_bits > 0) {
|
||||
// Ensure we have random bits to take from.
|
||||
if (m_bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
// Determine how many bits to take this round.
|
||||
const auto cur_bits = std::min(num_bits, m_bits_available);
|
||||
|
||||
// Generate mask for our current bits.
|
||||
const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;
|
||||
|
||||
// Add bits to output from our entropy.
|
||||
result <<= cur_bits;
|
||||
result |= (m_entropy & mask);
|
||||
|
||||
// Remove bits from our entropy.
|
||||
m_entropy >>= cur_bits;
|
||||
m_bits_available -= cur_bits;
|
||||
|
||||
// Advance.
|
||||
num_bits -= cur_bits;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
Common::TinyMT m_rng;
|
||||
u32 m_entropy{};
|
||||
u32 m_bits_available{};
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth = 4;
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
RandomBitGenerator rng{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
static constexpr size_t MaxDepth = 4;
|
||||
|
||||
public:
|
||||
KPageBitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
constexpr size_t GetNumBits() const {
|
||||
return m_num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
return static_cast<s32>(m_used_depths) - 1;
|
||||
}
|
||||
|
||||
u64* Initialize(u64* storage, std::size_t size) {
|
||||
u64* Initialize(u64* storage, size_t size) {
|
||||
// Initially, everything is un-set.
|
||||
num_bits = 0;
|
||||
m_num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth.
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
m_used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||
ASSERT(m_used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers.
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
m_bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
storage += size;
|
||||
m_end_storages[depth] = storage;
|
||||
}
|
||||
|
||||
return storage;
|
||||
@@ -128,19 +147,19 @@ public:
|
||||
|
||||
if (random) {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
const u64 v = m_bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v);
|
||||
offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
} while (depth < static_cast<s32>(m_used_depths));
|
||||
} else {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
const u64 v = m_bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
@@ -149,28 +168,69 @@ public:
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + std::countr_zero(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
} while (depth < static_cast<s32>(m_used_depths));
|
||||
}
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
void SetBit(std::size_t offset) {
|
||||
s64 FindFreeRange(size_t count) {
|
||||
// Check that it is possible to find a range.
|
||||
const u64* const storage_start = m_bit_storages[m_used_depths - 1];
|
||||
const u64* const storage_end = m_end_storages[m_used_depths - 1];
|
||||
|
||||
// If we don't have a storage to iterate (or want more blocks than fit in a single storage),
|
||||
// we can't find a free range.
|
||||
if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Walk the storages to select a random free range.
|
||||
const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1);
|
||||
const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1);
|
||||
|
||||
const u64 free_mask = (static_cast<u64>(1) << count) - 1;
|
||||
|
||||
size_t num_valid_options = 0;
|
||||
s64 chosen_offset = -1;
|
||||
for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) {
|
||||
u64 storage = storage_start[storage_index];
|
||||
for (size_t option = 0; option < options_per_storage; ++option) {
|
||||
if ((storage & free_mask) == free_mask) {
|
||||
// We've found a new valid option.
|
||||
++num_valid_options;
|
||||
|
||||
// Select the Kth valid option with probability 1/K. This leads to an overall
|
||||
// uniform distribution.
|
||||
if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) {
|
||||
// This is our first option, so select it.
|
||||
chosen_offset = storage_index * Common::BitSize<u64>() + option * count;
|
||||
}
|
||||
}
|
||||
storage >>= count;
|
||||
}
|
||||
}
|
||||
|
||||
// Return the random offset we chose.*/
|
||||
return chosen_offset;
|
||||
}
|
||||
|
||||
void SetBit(size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
m_num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(std::size_t offset) {
|
||||
void ClearBit(size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
m_num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
bool ClearRange(size_t offset, size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64* bits = bit_storages[depth];
|
||||
std::size_t bit_ind = offset / Common::BitSize<u64>();
|
||||
if (count < Common::BitSize<u64>()) {
|
||||
const std::size_t shift = offset % Common::BitSize<u64>();
|
||||
u64* bits = m_bit_storages[depth];
|
||||
size_t bit_ind = offset / Common::BitSize<u64>();
|
||||
if (count < Common::BitSize<u64>()) [[likely]] {
|
||||
const size_t shift = offset % Common::BitSize<u64>();
|
||||
ASSERT(shift + count <= Common::BitSize<u64>());
|
||||
// Check that all the bits are set.
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
@@ -189,8 +249,8 @@ public:
|
||||
ASSERT(offset % Common::BitSize<u64>() == 0);
|
||||
ASSERT(count % Common::BitSize<u64>() == 0);
|
||||
// Check that all the bits are set.
|
||||
std::size_t remaining = count;
|
||||
std::size_t i = 0;
|
||||
size_t remaining = count;
|
||||
size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
@@ -209,18 +269,18 @@ public:
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
m_num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
void SetBit(s32 depth, std::size_t offset) {
|
||||
void SetBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
size_t ind = offset / Common::BitSize<u64>();
|
||||
size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64* bit = std::addressof(m_bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
@@ -232,13 +292,13 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, std::size_t offset) {
|
||||
void ClearBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
size_t ind = offset / Common::BitSize<u64>();
|
||||
size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64* bit = std::addressof(m_bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
@@ -252,7 +312,7 @@ private:
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= Common::BitSize<u64>();
|
||||
@@ -264,8 +324,8 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
static constexpr size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size =
|
||||
Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
@@ -273,6 +333,13 @@ public:
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> m_bit_storages{};
|
||||
std::array<u64*, MaxDepth> m_end_storages{};
|
||||
RandomBitGenerator m_rng;
|
||||
size_t m_num_bits{};
|
||||
size_t m_used_depths{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,7 +12,7 @@ namespace Kernel {
|
||||
|
||||
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
||||
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
||||
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
|
||||
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -11,8 +11,19 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
|
||||
public:
|
||||
static constexpr size_t BufferSize = PageSize;
|
||||
|
||||
public:
|
||||
void Initialize(Core::System& system);
|
||||
};
|
||||
|
||||
class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
|
||||
public:
|
||||
explicit KPageBuffer(KernelCore&) {}
|
||||
KPageBuffer() = default;
|
||||
|
||||
static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
|
||||
@@ -20,8 +31,6 @@ public:
|
||||
private:
|
||||
[[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
|
||||
};
|
||||
|
||||
static_assert(sizeof(KPageBuffer) == PageSize);
|
||||
static_assert(alignof(KPageBuffer) == PageSize);
|
||||
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
@@ -12,6 +13,89 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageGroup;
|
||||
|
||||
class KBlockInfo {
|
||||
private:
|
||||
friend class KPageGroup;
|
||||
|
||||
public:
|
||||
constexpr KBlockInfo() = default;
|
||||
|
||||
constexpr void Initialize(PAddr addr, size_t np) {
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
ASSERT(static_cast<u32>(np) == np);
|
||||
|
||||
m_page_index = static_cast<u32>(addr) / PageSize;
|
||||
m_num_pages = static_cast<u32>(np);
|
||||
}
|
||||
|
||||
constexpr PAddr GetAddress() const {
|
||||
return m_page_index * PageSize;
|
||||
}
|
||||
constexpr size_t GetNumPages() const {
|
||||
return m_num_pages;
|
||||
}
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetNumPages() * PageSize;
|
||||
}
|
||||
constexpr PAddr GetEndAddress() const {
|
||||
return (m_page_index + m_num_pages) * PageSize;
|
||||
}
|
||||
constexpr PAddr GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr KBlockInfo* GetNext() const {
|
||||
return m_next;
|
||||
}
|
||||
|
||||
constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const {
|
||||
return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const KBlockInfo& rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const KBlockInfo& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr bool IsStrictlyBefore(PAddr addr) const {
|
||||
const PAddr end = this->GetEndAddress();
|
||||
|
||||
if (m_page_index != 0 && end == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return end < addr;
|
||||
}
|
||||
|
||||
constexpr bool operator<(PAddr addr) const {
|
||||
return this->IsStrictlyBefore(addr);
|
||||
}
|
||||
|
||||
constexpr bool TryConcatenate(PAddr addr, size_t np) {
|
||||
if (addr != 0 && addr == this->GetEndAddress()) {
|
||||
m_num_pages += static_cast<u32>(np);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void SetNext(KBlockInfo* next) {
|
||||
m_next = next;
|
||||
}
|
||||
|
||||
private:
|
||||
KBlockInfo* m_next{};
|
||||
u32 m_page_index{};
|
||||
u32 m_num_pages{};
|
||||
};
|
||||
static_assert(sizeof(KBlockInfo) <= 0x10);
|
||||
|
||||
class KPageGroup final {
|
||||
public:
|
||||
class Node final {
|
||||
@@ -92,6 +176,8 @@ public:
|
||||
return nodes.empty();
|
||||
}
|
||||
|
||||
void Finalize() {}
|
||||
|
||||
private:
|
||||
std::list<Node> nodes;
|
||||
};
|
||||
|
||||
@@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const {
|
||||
return num_free;
|
||||
}
|
||||
|
||||
PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||
PAddr KPageHeap::AllocateByLinearSearch(s32 index) {
|
||||
const size_t needed_size = m_blocks[index].GetSize();
|
||||
|
||||
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
||||
if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
|
||||
if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) {
|
||||
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
||||
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
@@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
|
||||
// Get the size and required alignment.
|
||||
const size_t needed_size = num_pages * PageSize;
|
||||
const size_t align_size = align_pages * PageSize;
|
||||
|
||||
// Determine meta-alignment of our desired alignment size.
|
||||
const size_t align_shift = std::countr_zero(align_size);
|
||||
|
||||
// Decide on a block to allocate from.
|
||||
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
|
||||
{
|
||||
// By default, we'll want to look at all blocks larger than our current one.
|
||||
s32 max_blocks = static_cast<s32>(m_num_blocks);
|
||||
|
||||
// Determine the maximum block we should try to allocate from.
|
||||
size_t possible_alignments = 0;
|
||||
for (s32 i = index; i < max_blocks; ++i) {
|
||||
// Add the possible alignments from blocks at the current size.
|
||||
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
|
||||
m_blocks[i].GetNumFreeBlocks();
|
||||
|
||||
// If there are enough possible alignments, we don't need to look at larger blocks.
|
||||
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
|
||||
max_blocks = i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we have any possible alignments which require a larger block, we need to pick one.
|
||||
if (possible_alignments > 0 && index + 1 < max_blocks) {
|
||||
// Select a random alignment from the possibilities.
|
||||
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
|
||||
|
||||
// Determine which block corresponds to the random alignment we chose.
|
||||
possible_alignments = 0;
|
||||
for (s32 i = index; i < max_blocks; ++i) {
|
||||
// Add the possible alignments from blocks at the current size.
|
||||
possible_alignments +=
|
||||
(1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) *
|
||||
m_blocks[i].GetNumFreeBlocks();
|
||||
|
||||
// If the current block gets us to our random choice, use the current block.
|
||||
if (rnd < possible_alignments) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pop a block from the index we selected.
|
||||
if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) {
|
||||
// Determine how much size we have left over.
|
||||
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size;
|
||||
leftover_size > 0) {
|
||||
// Determine how many valid alignments we can have.
|
||||
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
|
||||
|
||||
// Select a random valid alignment.
|
||||
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
|
||||
|
||||
// Free memory before the random offset.
|
||||
if (random_offset != 0) {
|
||||
this->Free(addr, random_offset / PageSize);
|
||||
}
|
||||
|
||||
// Advance our block by the random offset.
|
||||
addr += random_offset;
|
||||
|
||||
// Free memory after our allocated block.
|
||||
if (random_offset != leftover_size) {
|
||||
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the block we allocated.
|
||||
return addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void KPageHeap::FreeBlock(PAddr block, s32 index) {
|
||||
do {
|
||||
block = m_blocks[index++].PushBlock(block);
|
||||
|
||||
@@ -14,13 +14,9 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageHeap final {
|
||||
class KPageHeap {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(KPageHeap);
|
||||
YUZU_NON_MOVEABLE(KPageHeap);
|
||||
|
||||
KPageHeap() = default;
|
||||
~KPageHeap() = default;
|
||||
|
||||
constexpr PAddr GetAddress() const {
|
||||
return m_heap_address;
|
||||
@@ -57,7 +53,20 @@ public:
|
||||
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
||||
}
|
||||
|
||||
PAddr AllocateBlock(s32 index, bool random);
|
||||
PAddr AllocateBlock(s32 index, bool random) {
|
||||
if (random) {
|
||||
const size_t block_pages = m_blocks[index].GetNumPages();
|
||||
return this->AllocateByRandom(index, block_pages, block_pages);
|
||||
} else {
|
||||
return this->AllocateByLinearSearch(index);
|
||||
}
|
||||
}
|
||||
|
||||
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
|
||||
// TODO: linear search support?
|
||||
return this->AllocateByRandom(index, num_pages, align_pages);
|
||||
}
|
||||
|
||||
void Free(PAddr addr, size_t num_pages);
|
||||
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
@@ -68,7 +77,7 @@ public:
|
||||
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
|
||||
const size_t target_pages = std::max(num_pages, align_pages);
|
||||
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
@@ -77,7 +86,7 @@ public:
|
||||
|
||||
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
||||
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
|
||||
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@@ -85,7 +94,7 @@ public:
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockSize(size_t index) {
|
||||
return size_t(1) << MemoryBlockPageShifts[index];
|
||||
return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr size_t GetBlockNumPages(size_t index) {
|
||||
@@ -93,13 +102,9 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
class Block final {
|
||||
class Block {
|
||||
public:
|
||||
YUZU_NON_COPYABLE(Block);
|
||||
YUZU_NON_MOVEABLE(Block);
|
||||
|
||||
Block() = default;
|
||||
~Block() = default;
|
||||
|
||||
constexpr size_t GetShift() const {
|
||||
return m_block_shift;
|
||||
@@ -201,6 +206,9 @@ private:
|
||||
};
|
||||
|
||||
private:
|
||||
PAddr AllocateByLinearSearch(s32 index);
|
||||
PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
|
||||
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
|
||||
size_t num_block_shifts);
|
||||
|
||||
@@ -209,7 +217,8 @@ private:
|
||||
size_t m_heap_size{};
|
||||
size_t m_initial_used_size{};
|
||||
size_t m_num_blocks{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> m_blocks;
|
||||
KPageBitmap::RandomBitGenerator m_rng;
|
||||
std::vector<u64> m_management_data;
|
||||
};
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,11 +9,14 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -21,7 +24,10 @@ class System;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KBlockInfoManager;
|
||||
class KMemoryBlockManager;
|
||||
class KResourceLimit;
|
||||
class KSystemResource;
|
||||
|
||||
class KPageTable final {
|
||||
public:
|
||||
@@ -34,129 +40,185 @@ public:
|
||||
~KPageTable();
|
||||
|
||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
|
||||
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
|
||||
VAddr code_addr, size_t code_size, KSystemResource* system_resource,
|
||||
KResourceLimit* resource_limit);
|
||||
|
||||
void Finalize();
|
||||
|
||||
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
Result MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result MapPhysicalMemory(VAddr addr, size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
||||
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state),
|
||||
this->GetRegionSize(state) / PageSize, state, perm));
|
||||
}
|
||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
|
||||
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
Result ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(std::size_t size);
|
||||
Result SetHeapSize(VAddr* out, std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
|
||||
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(size_t size);
|
||||
Result SetHeapSize(VAddr* out, size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
|
||||
VAddr region_start, size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
PAddr map_addr = 0);
|
||||
|
||||
Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
|
||||
KMemoryPermission perm, bool is_aligned, bool check_heap);
|
||||
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap);
|
||||
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
|
||||
|
||||
Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
|
||||
Result UnlockForIpcUserBuffer(VAddr address, size_t size);
|
||||
|
||||
Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
|
||||
KMemoryPermission test_perm, KMemoryState dst_state, bool send);
|
||||
Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
|
||||
Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
|
||||
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
|
||||
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
return *m_page_table_impl;
|
||||
}
|
||||
|
||||
const Common::PageTable& PageTableImpl() const {
|
||||
return page_table_impl;
|
||||
return *m_page_table_impl;
|
||||
}
|
||||
|
||||
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
|
||||
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
|
||||
|
||||
protected:
|
||||
struct PageLinkedList {
|
||||
private:
|
||||
struct Node {
|
||||
Node* m_next;
|
||||
std::array<u8, PageSize - sizeof(Node*)> m_buffer;
|
||||
};
|
||||
|
||||
public:
|
||||
constexpr PageLinkedList() = default;
|
||||
|
||||
void Push(Node* n) {
|
||||
ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
||||
n->m_next = m_root;
|
||||
m_root = n;
|
||||
}
|
||||
|
||||
void Push(Core::Memory::Memory& memory, VAddr addr) {
|
||||
this->Push(memory.GetPointer<Node>(addr));
|
||||
}
|
||||
|
||||
Node* Peek() const {
|
||||
return m_root;
|
||||
}
|
||||
|
||||
Node* Pop() {
|
||||
Node* const r = m_root;
|
||||
|
||||
m_root = r->m_next;
|
||||
r->m_next = nullptr;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
private:
|
||||
Node* m_root{};
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
|
||||
|
||||
private:
|
||||
enum class OperationType : u32 {
|
||||
Map,
|
||||
MapGroup,
|
||||
Unmap,
|
||||
ChangePermissions,
|
||||
ChangePermissionsAndRefresh,
|
||||
Map = 0,
|
||||
MapFirst = 1,
|
||||
MapGroup = 2,
|
||||
Unmap = 3,
|
||||
ChangePermissions = 4,
|
||||
ChangePermissionsAndRefresh = 5,
|
||||
Separate = 6,
|
||||
};
|
||||
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
||||
|
||||
Result InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
|
||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
|
||||
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
|
||||
size_t align);
|
||||
Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
|
||||
PAddr map_addr = 0);
|
||||
void FinalizeUpdate(PageLinkedList* page_list);
|
||||
VAddr GetRegionAddress(KMemoryState state) const;
|
||||
std::size_t GetRegionSize(KMemoryState state) const;
|
||||
size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset, size_t guard_pages);
|
||||
|
||||
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr);
|
||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
|
||||
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr,
|
||||
ignore_attr));
|
||||
}
|
||||
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr);
|
||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr));
|
||||
}
|
||||
|
||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
@@ -174,13 +236,13 @@ private:
|
||||
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return general_lock.IsLockedByCurrentThread();
|
||||
return m_general_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
|
||||
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
|
||||
@@ -191,95 +253,105 @@ private:
|
||||
return *out != 0;
|
||||
}
|
||||
|
||||
mutable KLightLock general_lock;
|
||||
mutable KLightLock map_physical_memory_lock;
|
||||
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
|
||||
size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
|
||||
Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
|
||||
KMemoryPermission test_perm, KMemoryState dst_state,
|
||||
KPageTable& src_page_table, bool send);
|
||||
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
|
||||
size_t size, KMemoryPermission prot_perm);
|
||||
|
||||
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||
// HACK: These will be removed once we automatically manage page reference counts.
|
||||
void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
|
||||
void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
|
||||
|
||||
mutable KLightLock m_general_lock;
|
||||
mutable KLightLock m_map_physical_memory_lock;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
return address_space_start;
|
||||
return m_address_space_start;
|
||||
}
|
||||
constexpr VAddr GetAddressSpaceEnd() const {
|
||||
return address_space_end;
|
||||
return m_address_space_end;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceSize() const {
|
||||
return address_space_end - address_space_start;
|
||||
constexpr size_t GetAddressSpaceSize() const {
|
||||
return m_address_space_end - m_address_space_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionStart() const {
|
||||
return heap_region_start;
|
||||
return m_heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetHeapRegionEnd() const {
|
||||
return heap_region_end;
|
||||
return m_heap_region_end;
|
||||
}
|
||||
constexpr std::size_t GetHeapRegionSize() const {
|
||||
return heap_region_end - heap_region_start;
|
||||
constexpr size_t GetHeapRegionSize() const {
|
||||
return m_heap_region_end - m_heap_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionStart() const {
|
||||
return alias_region_start;
|
||||
return m_alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasRegionEnd() const {
|
||||
return alias_region_end;
|
||||
return m_alias_region_end;
|
||||
}
|
||||
constexpr std::size_t GetAliasRegionSize() const {
|
||||
return alias_region_end - alias_region_start;
|
||||
constexpr size_t GetAliasRegionSize() const {
|
||||
return m_alias_region_end - m_alias_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionStart() const {
|
||||
return stack_region_start;
|
||||
return m_stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetStackRegionEnd() const {
|
||||
return stack_region_end;
|
||||
return m_stack_region_end;
|
||||
}
|
||||
constexpr std::size_t GetStackRegionSize() const {
|
||||
return stack_region_end - stack_region_start;
|
||||
constexpr size_t GetStackRegionSize() const {
|
||||
return m_stack_region_end - m_stack_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionStart() const {
|
||||
return kernel_map_region_start;
|
||||
return m_kernel_map_region_start;
|
||||
}
|
||||
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||
return kernel_map_region_end;
|
||||
return m_kernel_map_region_end;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionStart() const {
|
||||
return code_region_start;
|
||||
return m_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetCodeRegionEnd() const {
|
||||
return code_region_end;
|
||||
return m_code_region_end;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||
return alias_code_region_start;
|
||||
return m_alias_code_region_start;
|
||||
}
|
||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||
return alias_code_region_end - alias_code_region_start;
|
||||
return m_alias_code_region_end - m_alias_code_region_start;
|
||||
}
|
||||
std::size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(general_lock);
|
||||
return GetHeapSize() + mapped_physical_memory_size;
|
||||
size_t GetNormalMemorySize() {
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
||||
}
|
||||
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||
return address_space_width;
|
||||
constexpr size_t GetAddressSpaceWidth() const {
|
||||
return m_address_space_width;
|
||||
}
|
||||
constexpr std::size_t GetHeapSize() const {
|
||||
return current_heap_end - heap_region_start;
|
||||
constexpr size_t GetHeapSize() const {
|
||||
return m_current_heap_end - m_heap_region_start;
|
||||
}
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||
constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
|
||||
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||
constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
|
||||
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
|
||||
}
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||
constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
|
||||
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
|
||||
}
|
||||
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
|
||||
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||
}
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > heap_region_start && heap_region_end > address;
|
||||
constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
|
||||
return address + size > m_heap_region_start && m_heap_region_end > address;
|
||||
}
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||
return address + size > alias_region_start && alias_region_end > address;
|
||||
constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
|
||||
return address + size > m_alias_region_start && m_alias_region_end > address;
|
||||
}
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
|
||||
if (IsInvalidRegion(address, size)) {
|
||||
return true;
|
||||
}
|
||||
@@ -291,73 +363,128 @@ public:
|
||||
}
|
||||
return {};
|
||||
}
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||
constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr std::size_t GetNumGuardPages() const {
|
||||
constexpr size_t GetNumGuardPages() const {
|
||||
return IsKernel() ? 1 : 4;
|
||||
}
|
||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
|
||||
ASSERT(backing_addr);
|
||||
return backing_addr + addr;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
||||
}
|
||||
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||
return address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= address_space_end - 1;
|
||||
constexpr bool Contains(VAddr addr, size_t size) const {
|
||||
return m_address_space_start <= addr && addr < addr + size &&
|
||||
addr + size - 1 <= m_address_space_end - 1;
|
||||
}
|
||||
|
||||
public:
|
||||
static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
|
||||
return layout.GetLinearVirtualAddress(addr);
|
||||
}
|
||||
|
||||
static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
|
||||
return layout.GetLinearPhysicalAddress(addr);
|
||||
}
|
||||
|
||||
static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
|
||||
return GetLinearMappedVirtualAddress(layout, addr);
|
||||
}
|
||||
|
||||
static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
|
||||
return GetLinearMappedPhysicalAddress(layout, addr);
|
||||
}
|
||||
|
||||
static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
|
||||
return GetLinearMappedVirtualAddress(layout, addr);
|
||||
}
|
||||
|
||||
static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
|
||||
return GetLinearMappedPhysicalAddress(layout, addr);
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool IsKernel() const {
|
||||
return is_kernel;
|
||||
return m_is_kernel;
|
||||
}
|
||||
constexpr bool IsAslrEnabled() const {
|
||||
return is_aslr_enabled;
|
||||
return m_enable_aslr;
|
||||
}
|
||||
|
||||
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||
return (address_space_start <= addr) &&
|
||||
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||
constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
|
||||
return (m_address_space_start <= addr) &&
|
||||
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
|
||||
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||
}
|
||||
|
||||
private:
|
||||
VAddr address_space_start{};
|
||||
VAddr address_space_end{};
|
||||
VAddr heap_region_start{};
|
||||
VAddr heap_region_end{};
|
||||
VAddr current_heap_end{};
|
||||
VAddr alias_region_start{};
|
||||
VAddr alias_region_end{};
|
||||
VAddr stack_region_start{};
|
||||
VAddr stack_region_end{};
|
||||
VAddr kernel_map_region_start{};
|
||||
VAddr kernel_map_region_end{};
|
||||
VAddr code_region_start{};
|
||||
VAddr code_region_end{};
|
||||
VAddr alias_code_region_start{};
|
||||
VAddr alias_code_region_end{};
|
||||
class KScopedPageTableUpdater {
|
||||
private:
|
||||
KPageTable* m_pt{};
|
||||
PageLinkedList m_ll;
|
||||
|
||||
std::size_t mapped_physical_memory_size{};
|
||||
std::size_t max_heap_size{};
|
||||
std::size_t max_physical_memory_size{};
|
||||
std::size_t address_space_width{};
|
||||
public:
|
||||
explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
|
||||
explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
|
||||
~KScopedPageTableUpdater() {
|
||||
m_pt->FinalizeUpdate(this->GetPageList());
|
||||
}
|
||||
|
||||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
PageLinkedList* GetPageList() {
|
||||
return &m_ll;
|
||||
}
|
||||
};
|
||||
|
||||
u32 heap_fill_value{};
|
||||
const KMemoryRegion* cached_physical_heap_region{};
|
||||
private:
|
||||
VAddr m_address_space_start{};
|
||||
VAddr m_address_space_end{};
|
||||
VAddr m_heap_region_start{};
|
||||
VAddr m_heap_region_end{};
|
||||
VAddr m_current_heap_end{};
|
||||
VAddr m_alias_region_start{};
|
||||
VAddr m_alias_region_end{};
|
||||
VAddr m_stack_region_start{};
|
||||
VAddr m_stack_region_end{};
|
||||
VAddr m_kernel_map_region_start{};
|
||||
VAddr m_kernel_map_region_end{};
|
||||
VAddr m_code_region_start{};
|
||||
VAddr m_code_region_end{};
|
||||
VAddr m_alias_code_region_start{};
|
||||
VAddr m_alias_code_region_end{};
|
||||
|
||||
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
|
||||
size_t m_max_heap_size{};
|
||||
size_t m_mapped_physical_memory_size{};
|
||||
size_t m_mapped_unsafe_physical_memory{};
|
||||
size_t m_mapped_insecure_memory{};
|
||||
size_t m_mapped_ipc_server_memory{};
|
||||
size_t m_address_space_width{};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
KMemoryBlockManager m_memory_block_manager;
|
||||
u32 m_allocate_option{};
|
||||
|
||||
Core::System& system;
|
||||
bool m_is_kernel{};
|
||||
bool m_enable_aslr{};
|
||||
bool m_enable_device_address_space_merge{};
|
||||
|
||||
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
|
||||
KBlockInfoManager* m_block_info_manager{};
|
||||
KResourceLimit* m_resource_limit{};
|
||||
|
||||
u32 m_heap_fill_value{};
|
||||
u32 m_ipc_fill_value{};
|
||||
u32 m_stack_fill_value{};
|
||||
const KMemoryRegion* m_cached_physical_heap_region{};
|
||||
|
||||
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
|
||||
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
|
||||
|
||||
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
||||
|
||||
Core::System& m_system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
55
src/core/hle/kernel/k_page_table_manager.h
Normal file
55
src/core/hle/kernel/k_page_table_manager.h
Normal file
@@ -0,0 +1,55 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_page_table_slab_heap.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> {
|
||||
public:
|
||||
using RefCount = KPageTableSlabHeap::RefCount;
|
||||
static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize;
|
||||
|
||||
public:
|
||||
KPageTableManager() = default;
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) {
|
||||
m_pt_heap = pt_heap;
|
||||
|
||||
static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>);
|
||||
BaseHeap::Initialize(page_allocator, pt_heap);
|
||||
}
|
||||
|
||||
VAddr Allocate() {
|
||||
return VAddr(BaseHeap::Allocate());
|
||||
}
|
||||
|
||||
RefCount GetRefCount(VAddr addr) const {
|
||||
return m_pt_heap->GetRefCount(addr);
|
||||
}
|
||||
|
||||
void Open(VAddr addr, int count) {
|
||||
return m_pt_heap->Open(addr, count);
|
||||
}
|
||||
|
||||
bool Close(VAddr addr, int count) {
|
||||
return m_pt_heap->Close(addr, count);
|
||||
}
|
||||
|
||||
bool IsInPageTableHeap(VAddr addr) const {
|
||||
return m_pt_heap->IsInRange(addr);
|
||||
}
|
||||
|
||||
private:
|
||||
using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>;
|
||||
|
||||
KPageTableSlabHeap* m_pt_heap{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
93
src/core/hle/kernel/k_page_table_slab_heap.h
Normal file
93
src/core/hle/kernel/k_page_table_slab_heap.h
Normal file
@@ -0,0 +1,93 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_dynamic_slab_heap.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class PageTablePage {
|
||||
public:
|
||||
// Do not initialize anything.
|
||||
PageTablePage() = default;
|
||||
|
||||
private:
|
||||
std::array<u8, PageSize> m_buffer{};
|
||||
};
|
||||
static_assert(sizeof(PageTablePage) == PageSize);
|
||||
|
||||
} // namespace impl
|
||||
|
||||
class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> {
|
||||
public:
|
||||
using RefCount = u16;
|
||||
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
|
||||
static_assert(PageTableSize == PageSize);
|
||||
|
||||
public:
|
||||
KPageTableSlabHeap() = default;
|
||||
|
||||
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
||||
return (size / PageSize) * sizeof(RefCount);
|
||||
}
|
||||
|
||||
void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) {
|
||||
BaseHeap::Initialize(page_allocator, object_count);
|
||||
this->Initialize(rc);
|
||||
}
|
||||
|
||||
RefCount GetRefCount(VAddr addr) {
|
||||
ASSERT(this->IsInRange(addr));
|
||||
return *this->GetRefCountPointer(addr);
|
||||
}
|
||||
|
||||
void Open(VAddr addr, int count) {
|
||||
ASSERT(this->IsInRange(addr));
|
||||
|
||||
*this->GetRefCountPointer(addr) += static_cast<RefCount>(count);
|
||||
|
||||
ASSERT(this->GetRefCount(addr) > 0);
|
||||
}
|
||||
|
||||
bool Close(VAddr addr, int count) {
|
||||
ASSERT(this->IsInRange(addr));
|
||||
ASSERT(this->GetRefCount(addr) >= count);
|
||||
|
||||
*this->GetRefCountPointer(addr) -= static_cast<RefCount>(count);
|
||||
return this->GetRefCount(addr) == 0;
|
||||
}
|
||||
|
||||
bool IsInPageTableHeap(VAddr addr) const {
|
||||
return this->IsInRange(addr);
|
||||
}
|
||||
|
||||
private:
|
||||
void Initialize([[maybe_unused]] RefCount* rc) {
|
||||
// TODO(bunnei): Use rc once we support kernel virtual memory allocations.
|
||||
const auto count = this->GetSize() / PageSize;
|
||||
m_ref_counts.resize(count);
|
||||
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
m_ref_counts[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
RefCount* GetRefCountPointer(VAddr addr) {
|
||||
return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize);
|
||||
}
|
||||
|
||||
private:
|
||||
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
|
||||
|
||||
std::vector<RefCount> m_ref_counts;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -57,12 +57,6 @@ Result KPort::EnqueueSession(KServerSession* session) {
|
||||
|
||||
server.EnqueueSession(session);
|
||||
|
||||
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
|
||||
session_ptr->ClientConnected(server.AcceptSession());
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ namespace {
|
||||
*/
|
||||
void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
|
||||
|
||||
KThread* thread = KThread::Create(system.Kernel());
|
||||
SCOPE_EXIT({ thread->Close(); });
|
||||
@@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||
|
||||
process->name = std::move(process_name);
|
||||
process->resource_limit = res_limit;
|
||||
process->status = ProcessStatus::Created;
|
||||
process->system_resource_address = 0;
|
||||
process->state = State::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
: kernel.CreateNewUserProcessID();
|
||||
@@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||
process->exception_thread = nullptr;
|
||||
process->is_suspended = false;
|
||||
process->schedule_count = 0;
|
||||
process->is_handle_table_initialized = false;
|
||||
|
||||
// Open a reference to the resource limit.
|
||||
process->resource_limit->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::DoWorkerTaskImpl() {
|
||||
@@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() {
|
||||
}
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||
page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
|
||||
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
|
||||
capacity != pool_size) {
|
||||
@@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
|
||||
return memory_usage_capacity;
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
|
||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
|
||||
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsed() const {
|
||||
return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsed() {
|
||||
return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
|
||||
GetSystemResourceSize();
|
||||
}
|
||||
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
||||
shmem->Open();
|
||||
shemen_info->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
@@ -289,12 +291,12 @@ Result KProcess::Reset() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate that we're in a state that we can reset.
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
R_UNLESS(is_signaled, ResultInvalidState);
|
||||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
@@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate our state.
|
||||
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminating, ResultInvalidState);
|
||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == ProcessActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
if (is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(!is_suspended, ResultInvalidState);
|
||||
|
||||
// Suspend all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
@@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
ASSERT(activity == ProcessActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
if (!is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(is_suspended, ResultInvalidState);
|
||||
|
||||
// Resume all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
@@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
SetSuspended(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
@@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
// We currently do not support process-specific system resource
|
||||
UNIMPLEMENTED_IF(system_resource_size != 0);
|
||||
|
||||
KScopedResourceReservation memory_reservation(
|
||||
resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
return ResultLimitReached;
|
||||
R_RETURN(ResultLimitReached);
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
|
||||
0x8000000, code_size,
|
||||
KMemoryManager::Pool::Application)};
|
||||
if (const Result result{page_table.InitializeForProcess(
|
||||
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
|
||||
0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Set memory usage capacity
|
||||
@@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
|
||||
break;
|
||||
|
||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
|
||||
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
|
||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
|
||||
page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -397,27 +398,27 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
|
||||
}
|
||||
|
||||
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
AllocateMainThreadStack(stack_size);
|
||||
resource_limit->Reserve(LimitableResource::Threads, 1);
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||
resource_limit->Reserve(LimitableResource::ThreadCountMax, 1);
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
|
||||
ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
|
||||
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
|
||||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
ChangeState(State::Running);
|
||||
|
||||
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
|
||||
}
|
||||
|
||||
void KProcess::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
ChangeState(State::Terminating);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||
for (auto* thread : in_thread_list) {
|
||||
@@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() {
|
||||
|
||||
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||
|
||||
this->DeleteThreadLocalRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
this->DeleteThreadLocalRegion(plr_address);
|
||||
plr_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||
resource_limit->Release(LimitableResource::PhysicalMemoryMax,
|
||||
main_thread_stack_size + image_size);
|
||||
}
|
||||
|
||||
ChangeStatus(ProcessStatus::Exited);
|
||||
ChangeState(State::Terminated);
|
||||
}
|
||||
|
||||
void KProcess::Finalize() {
|
||||
@@ -474,7 +475,7 @@ void KProcess::Finalize() {
|
||||
}
|
||||
|
||||
// Finalize the page table.
|
||||
page_table.reset();
|
||||
page_table.Finalize();
|
||||
|
||||
// Perform inherited finalization.
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
@@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
}
|
||||
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
// We succeeded!
|
||||
tlp_guard.Cancel();
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
@@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
KThreadLocalPage::Free(kernel, page_to_free);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
@@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Svc::MemoryPermission permission) {
|
||||
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
|
||||
@@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const {
|
||||
}
|
||||
|
||||
KProcess::KProcess(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
|
||||
kernel_.System())},
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
|
||||
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
|
||||
state_lock{kernel_}, list_lock{kernel_} {}
|
||||
|
||||
KProcess::~KProcess() = default;
|
||||
|
||||
void KProcess::ChangeStatus(ProcessStatus new_status) {
|
||||
if (status == new_status) {
|
||||
void KProcess::ChangeState(State new_state) {
|
||||
if (state == new_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
state = new_state;
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
@@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
const VAddr start{page_table.GetStackRegionStart()};
|
||||
const std::size_t size{page_table.GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
page_table.AllocateAndMapMemory(
|
||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_handle_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_worker_task.h"
|
||||
@@ -31,7 +32,6 @@ class ProgramMetadata;
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KPageTable;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class KSharedMemoryInfo;
|
||||
@@ -45,24 +45,6 @@ enum class MemoryRegion : u16 {
|
||||
BASE = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates the status of a Process instance.
|
||||
*
|
||||
* @note These match the values as used by kernel,
|
||||
* so new entries should only be added if RE
|
||||
* shows that a new value has been introduced.
|
||||
*/
|
||||
enum class ProcessStatus {
|
||||
Created,
|
||||
CreatedWithDebuggerAttached,
|
||||
Running,
|
||||
WaitingForDebuggerToAttach,
|
||||
DebuggerAttached,
|
||||
Exiting,
|
||||
Exited,
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
enum class ProcessActivity : u32 {
|
||||
Runnable,
|
||||
Paused,
|
||||
@@ -89,6 +71,17 @@ public:
|
||||
explicit KProcess(KernelCore& kernel_);
|
||||
~KProcess() override;
|
||||
|
||||
enum class State {
|
||||
Created = static_cast<u32>(Svc::ProcessState::Created),
|
||||
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
|
||||
Running = static_cast<u32>(Svc::ProcessState::Running),
|
||||
Crashed = static_cast<u32>(Svc::ProcessState::Crashed),
|
||||
RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached),
|
||||
Terminating = static_cast<u32>(Svc::ProcessState::Terminating),
|
||||
Terminated = static_cast<u32>(Svc::ProcessState::Terminated),
|
||||
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
|
||||
};
|
||||
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
@@ -114,12 +107,12 @@ public:
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
return *page_table;
|
||||
return page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const KPageTable& PageTable() const {
|
||||
return *page_table;
|
||||
return page_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' handle table.
|
||||
@@ -145,26 +138,25 @@ public:
|
||||
}
|
||||
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
VAddr GetTLSRegionAddress() const {
|
||||
return tls_region_address;
|
||||
VAddr GetProcessLocalRegionAddress() const {
|
||||
return plr_address;
|
||||
}
|
||||
|
||||
/// Gets the current status of the process
|
||||
ProcessStatus GetStatus() const {
|
||||
return status;
|
||||
State GetState() const {
|
||||
return state;
|
||||
}
|
||||
|
||||
/// Gets the unique ID that identifies this particular process.
|
||||
@@ -286,18 +278,18 @@ public:
|
||||
}
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryAvailable() const;
|
||||
u64 GetTotalPhysicalMemoryAvailable();
|
||||
|
||||
/// Retrieves the total physical memory available to this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
|
||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes.
|
||||
u64 GetTotalPhysicalMemoryUsed() const;
|
||||
u64 GetTotalPhysicalMemoryUsed();
|
||||
|
||||
/// Retrieves the total physical memory used by this process in bytes,
|
||||
/// without the size of the personal system resource heap added to it.
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
std::list<KThread*>& GetThreadList() {
|
||||
@@ -415,19 +407,24 @@ private:
|
||||
pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
/// Changes the process status. If the status is different
|
||||
/// from the current process status, then this will trigger
|
||||
/// a process signal.
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
void FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
handle_table.Finalize();
|
||||
|
||||
// Note that the table is finalized.
|
||||
is_handle_table_initialized = false;
|
||||
}
|
||||
|
||||
void ChangeState(State new_state);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
KPageTable page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
State state{};
|
||||
|
||||
/// The ID of this process
|
||||
u64 process_id = 0;
|
||||
@@ -443,6 +440,8 @@ private:
|
||||
/// Resource limit descriptor for this process
|
||||
KResourceLimit* resource_limit{};
|
||||
|
||||
VAddr system_resource_address{};
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
|
||||
@@ -469,7 +468,7 @@ private:
|
||||
KConditionVariable condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
VAddr plr_address = 0;
|
||||
|
||||
/// Random values for svcGetInfo RandomEntropy
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
@@ -495,8 +494,12 @@ private:
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
|
||||
size_t memory_release_hint{};
|
||||
|
||||
bool is_signaled{};
|
||||
bool is_suspended{};
|
||||
bool is_immortal{};
|
||||
bool is_handle_table_initialized{};
|
||||
bool is_initialized{};
|
||||
|
||||
std::atomic<u16> num_running_threads{};
|
||||
|
||||
@@ -159,12 +159,13 @@ KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical
|
||||
// TODO(bunnei): These values are the system defaults, the limits for service processes are
|
||||
// lower. These should use the correct limit values.
|
||||
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size)
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, physical_memory_size)
|
||||
.IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900).IsSuccess());
|
||||
ASSERT(
|
||||
resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200).IsSuccess());
|
||||
ASSERT(resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133).IsSuccess());
|
||||
|
||||
return resource_limit;
|
||||
}
|
||||
|
||||
@@ -16,15 +16,8 @@ class CoreTiming;
|
||||
|
||||
namespace Kernel {
|
||||
class KernelCore;
|
||||
enum class LimitableResource : u32 {
|
||||
PhysicalMemory = 0,
|
||||
Threads = 1,
|
||||
Events = 2,
|
||||
TransferMemory = 3,
|
||||
Sessions = 4,
|
||||
|
||||
Count,
|
||||
};
|
||||
using LimitableResource = Svc::LimitableResource;
|
||||
|
||||
constexpr bool IsValidResourceType(LimitableResource type) {
|
||||
return type < LimitableResource::Count;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user