Compare commits
307 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17dc8292db | ||
|
|
0a5bc8ec14 | ||
|
|
60511976bb | ||
|
|
2a341c9969 | ||
|
|
64606aefcf | ||
|
|
de5bf640b7 | ||
|
|
a4c6712a4b | ||
|
|
c6c0771b12 | ||
|
|
eedbe8351c | ||
|
|
62a8505345 | ||
|
|
2067115c78 | ||
|
|
bb922d6ff6 | ||
|
|
8146c8c5e7 | ||
|
|
12a343ed8d | ||
|
|
62b560e8e3 | ||
|
|
154eb3cfbe | ||
|
|
10ca4c9490 | ||
|
|
335a3cc115 | ||
|
|
7f06276192 | ||
|
|
ff6a83e0fe | ||
|
|
5e85bc3d23 | ||
|
|
8e7734bf40 | ||
|
|
0fb3773924 | ||
|
|
31932904c5 | ||
|
|
9b331a5fb5 | ||
|
|
c5f5d6e7f6 | ||
|
|
4198c92ed0 | ||
|
|
fddb278aa3 | ||
|
|
4209588505 | ||
|
|
26d60014d0 | ||
|
|
ddbd1387aa | ||
|
|
9f1cf99ea0 | ||
|
|
0c19147e09 | ||
|
|
55b7d8e322 | ||
|
|
a208c7b059 | ||
|
|
0ec6cb942d | ||
|
|
9bd71f4064 | ||
|
|
08337a492d | ||
|
|
bcf58c8210 | ||
|
|
4f60818eae | ||
|
|
290b452ea1 | ||
|
|
1063e0be48 | ||
|
|
b04877dd95 | ||
|
|
31c80b8c6f | ||
|
|
c6d2af16b5 | ||
|
|
9cf8bcc75c | ||
|
|
96d07b4949 | ||
|
|
d6e5e053a6 | ||
|
|
c34249559d | ||
|
|
9c85bcbecc | ||
|
|
467ef54e2a | ||
|
|
8a9e834fc2 | ||
|
|
e0bddf8f07 | ||
|
|
5ce0e127da | ||
|
|
6af7bd02b2 | ||
|
|
7e62452ac8 | ||
|
|
5ec2fdceca | ||
|
|
214ef2dd8a | ||
|
|
c1017efb4a | ||
|
|
09a8e08109 | ||
|
|
ac14ef70b5 | ||
|
|
35abd560c4 | ||
|
|
e7530b341e | ||
|
|
01928b1d17 | ||
|
|
ff1201210c | ||
|
|
f90041a2d4 | ||
|
|
6c81332ca7 | ||
|
|
7133bfdd2c | ||
|
|
556c5df745 | ||
|
|
4530b58eb0 | ||
|
|
53f1560a24 | ||
|
|
c37b8a1028 | ||
|
|
278c38aa4a | ||
|
|
a4e2821530 | ||
|
|
a83f0f7154 | ||
|
|
dbc4c8e314 | ||
|
|
03790771a6 | ||
|
|
0377618288 | ||
|
|
247b1c14d2 | ||
|
|
ec0e644fb0 | ||
|
|
8f1300cd31 | ||
|
|
6219da627b | ||
|
|
5ce97bf0a0 | ||
|
|
b059db74c5 | ||
|
|
c51d9e0b0a | ||
|
|
d7f2078e7b | ||
|
|
b6bbc0d483 | ||
|
|
701b6c2fb8 | ||
|
|
d894818bb3 | ||
|
|
1744fa6ecf | ||
|
|
5096ed5e0a | ||
|
|
dfac2e2d25 | ||
|
|
2283fccc1b | ||
|
|
415dfb6cd3 | ||
|
|
8e545deab9 | ||
|
|
15a07f0282 | ||
|
|
28d3661a5c | ||
|
|
262a70223f | ||
|
|
535e50db1c | ||
|
|
e8bd9aed8b | ||
|
|
e6fb49fa4b | ||
|
|
5145133a60 | ||
|
|
4aec060f6d | ||
|
|
3173a53db9 | ||
|
|
d490f6dcf8 | ||
|
|
6dc35caad1 | ||
|
|
a2d40b8185 | ||
|
|
1ad4c98a5c | ||
|
|
c44db90ad1 | ||
|
|
531e797795 | ||
|
|
ded420cfeb | ||
|
|
63caeca6ba | ||
|
|
4fe05d7b42 | ||
|
|
63ebf2a328 | ||
|
|
d04120169d | ||
|
|
ad0d5818a6 | ||
|
|
49ec5784b0 | ||
|
|
4cdc701fd3 | ||
|
|
95fa89da27 | ||
|
|
530a5a1d09 | ||
|
|
1e964604bb | ||
|
|
c018769016 | ||
|
|
bc30aa8249 | ||
|
|
97e2604575 | ||
|
|
24900674b7 | ||
|
|
068b2ffbcc | ||
|
|
55fc808d64 | ||
|
|
03dda80e2b | ||
|
|
8ce31f1c8e | ||
|
|
01ea0f3c74 | ||
|
|
2cc42e40c5 | ||
|
|
638c892edf | ||
|
|
a60653dcd3 | ||
|
|
7feb490f83 | ||
|
|
9be819faaf | ||
|
|
33ed02a239 | ||
|
|
69b2dbdffd | ||
|
|
17ad323b36 | ||
|
|
c92a211e7c | ||
|
|
c5ca8675c8 | ||
|
|
aca6ab6417 | ||
|
|
0fbd7752c3 | ||
|
|
eaf0c4af11 | ||
|
|
94d77d0e39 | ||
|
|
824adb44fa | ||
|
|
d3e87d70ec | ||
|
|
7ff7e65926 | ||
|
|
d69421b1db | ||
|
|
e44622860a | ||
|
|
bb3dce9363 | ||
|
|
d9db1d125e | ||
|
|
5ee669466f | ||
|
|
eeea426c74 | ||
|
|
ca30190fad | ||
|
|
54c1e0897d | ||
|
|
e6e61424d4 | ||
|
|
b3f68098d5 | ||
|
|
4930242c20 | ||
|
|
f234531f92 | ||
|
|
9b50b23a50 | ||
|
|
fb7dcbf7af | ||
|
|
a63dcb6d56 | ||
|
|
f8561c7a65 | ||
|
|
a527e5f0cd | ||
|
|
9cebde760f | ||
|
|
8a5794c4db | ||
|
|
cb7f2e5616 | ||
|
|
3b98fab850 | ||
|
|
b2fb5c60e1 | ||
|
|
7c26a9aefe | ||
|
|
e9a1f29e93 | ||
|
|
2f83d9a61b | ||
|
|
99fdfa1fcd | ||
|
|
65774084fd | ||
|
|
b83eb4dd18 | ||
|
|
8c016b02e7 | ||
|
|
52dae41d7f | ||
|
|
10d6e9f32b | ||
|
|
fb91647bca | ||
|
|
4eac8703d2 | ||
|
|
9032d21365 | ||
|
|
a32190d0c2 | ||
|
|
ab5995c7ae | ||
|
|
716285fab8 | ||
|
|
dde074eaab | ||
|
|
875183e7c5 | ||
|
|
a50133fc5e | ||
|
|
e274e38205 | ||
|
|
1d78190843 | ||
|
|
3ffbe50e7d | ||
|
|
fc5205fc84 | ||
|
|
a35717b245 | ||
|
|
1996cae9cb | ||
|
|
343eaecd38 | ||
|
|
8d0ba7ee49 | ||
|
|
80688362cf | ||
|
|
edbc505e52 | ||
|
|
10265ad0e4 | ||
|
|
28be8aec9a | ||
|
|
01f04fee32 | ||
|
|
f2e1441567 | ||
|
|
5762517728 | ||
|
|
a439867f2c | ||
|
|
43a29b5803 | ||
|
|
c17beefe3d | ||
|
|
3fb64da452 | ||
|
|
5872561077 | ||
|
|
541b4353e4 | ||
|
|
778e0f8ec1 | ||
|
|
538f097f97 | ||
|
|
afa4bcbb3b | ||
|
|
2e85ee250d | ||
|
|
cb48ed2e1a | ||
|
|
4aa8189328 | ||
|
|
ec514a4d1b | ||
|
|
e1f7938a3b | ||
|
|
ab102787fa | ||
|
|
8441094ba3 | ||
|
|
0687a8370d | ||
|
|
df9899eed6 | ||
|
|
824e53149d | ||
|
|
9761618a8d | ||
|
|
d3a4a192fe | ||
|
|
2f30c10584 | ||
|
|
c7553abe89 | ||
|
|
20eb368e14 | ||
|
|
f6566338eb | ||
|
|
2985e5e94c | ||
|
|
3b85ac2ac4 | ||
|
|
4735d18bb9 | ||
|
|
a9d24b0df3 | ||
|
|
5dae45b958 | ||
|
|
827dcad26e | ||
|
|
4439801c0f | ||
|
|
ad653550eb | ||
|
|
59173ff7a7 | ||
|
|
87cfe5b1da | ||
|
|
2490ffbbce | ||
|
|
daf5c5060b | ||
|
|
d1a7b2eca7 | ||
|
|
9c4c9f1e7d | ||
|
|
69ce5e41eb | ||
|
|
9d77ae39de | ||
|
|
de21c9e330 | ||
|
|
8f7eb194af | ||
|
|
cd7abba1a9 | ||
|
|
41e94b7b99 | ||
|
|
4bcc5bacff | ||
|
|
68ffac250a | ||
|
|
1f228c51ca | ||
|
|
4cf5b860bd | ||
|
|
47af34003b | ||
|
|
97415ad07a | ||
|
|
7b29a8ce4e | ||
|
|
a5ab85ac37 | ||
|
|
9d010be483 | ||
|
|
34a3ee1631 | ||
|
|
96c9e67b1b | ||
|
|
6faabd6d69 | ||
|
|
e7038344aa | ||
|
|
5213f70230 | ||
|
|
0639244d85 | ||
|
|
b8b5891585 | ||
|
|
394475c4e3 | ||
|
|
50ee9c46ab | ||
|
|
6ab839462c | ||
|
|
f8bfec3109 | ||
|
|
94da1e8a7e | ||
|
|
4a45012f35 | ||
|
|
5ad62e7bfc | ||
|
|
925671071c | ||
|
|
cd25817938 | ||
|
|
c7a7e47615 | ||
|
|
9b3af0027b | ||
|
|
ac8b1445ff | ||
|
|
52e9d7fa49 | ||
|
|
2910aa77b2 | ||
|
|
9e9341f4b4 | ||
|
|
ee9ebeeb80 | ||
|
|
e895ab7d6f | ||
|
|
4738e14cb0 | ||
|
|
55f556c53e | ||
|
|
ab65cb499d | ||
|
|
51fb0a6f96 | ||
|
|
09f7c355c6 | ||
|
|
bfa1644464 | ||
|
|
272bc4c3d6 | ||
|
|
1ba578c4aa | ||
|
|
d31dbb1bc1 | ||
|
|
aae399c1a8 | ||
|
|
1841ca4b9b | ||
|
|
71526ecfc7 | ||
|
|
ae876ed047 | ||
|
|
fb0b4c7e27 | ||
|
|
20245e660f | ||
|
|
ec19a85890 | ||
|
|
3de8e7a8f2 | ||
|
|
3d0394681c | ||
|
|
8e4c9c9852 | ||
|
|
2807a98168 | ||
|
|
1a5d4d7840 | ||
|
|
def03d4075 | ||
|
|
3acb265c9e | ||
|
|
728ee181eb | ||
|
|
9d8f793969 | ||
|
|
fb54c38631 | ||
|
|
21b40de318 |
18
.ci/scripts/clang/docker.sh
Executable file
18
.ci/scripts/clang/docker.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Exit on error, rather than continuing with the rest of the script.
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/clang -DCMAKE_CXX_COMPILER=/usr/lib/ccache/clang++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DCMAKE_INSTALL_PREFIX="/usr"
|
||||
|
||||
make -j$(nproc)
|
||||
|
||||
ccache -s
|
||||
|
||||
ctest -VV -C Release
|
||||
|
||||
8
.ci/scripts/clang/exec.sh
Normal file
8
.ci/scripts/clang/exec.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
mkdir -p "ccache" || true
|
||||
chmod a+x ./.ci/scripts/clang/docker.sh
|
||||
# the UID for the container yuzu user is 1027
|
||||
sudo chown -R 1027 ./
|
||||
docker run -e ENABLE_COMPATIBILITY_REPORTING -e CCACHE_DIR=/yuzu/ccache -v $(pwd):/yuzu yuzuemu/build-environments:linux-fresh /bin/bash /yuzu/.ci/scripts/clang/docker.sh $1
|
||||
sudo chown -R $UID ./
|
||||
20
.ci/scripts/clang/upload.sh
Normal file
20
.ci/scripts/clang/upload.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. .ci/scripts/common/pre-upload.sh
|
||||
|
||||
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.xz"
|
||||
COMPRESSION_FLAGS="-cJvf"
|
||||
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
DIR_NAME="${REV_NAME}"
|
||||
else
|
||||
DIR_NAME="${REV_NAME}_${RELEASE_NAME}"
|
||||
fi
|
||||
|
||||
mkdir "$DIR_NAME"
|
||||
|
||||
cp build/bin/yuzu-cmd "$DIR_NAME"
|
||||
cp build/bin/yuzu "$DIR_NAME"
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
@@ -8,7 +8,7 @@ steps:
|
||||
displayName: 'Install vulkan-sdk'
|
||||
- script: python -m pip install --upgrade pip conan
|
||||
displayName: 'Install conan'
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cmake --install . --config Release && cd ..
|
||||
displayName: 'Configure CMake'
|
||||
- task: MSBuild@1
|
||||
displayName: 'Build'
|
||||
|
||||
@@ -12,6 +12,9 @@ jobs:
|
||||
windows:
|
||||
BuildSuffix: 'windows-mingw'
|
||||
ScriptFolder: 'windows'
|
||||
clang:
|
||||
BuildSuffix: 'clang'
|
||||
ScriptFolder: 'clang'
|
||||
linux:
|
||||
BuildSuffix: 'linux'
|
||||
ScriptFolder: 'linux'
|
||||
@@ -24,4 +27,4 @@ jobs:
|
||||
parameters:
|
||||
artifactSource: 'false'
|
||||
cache: $(parameters.cache)
|
||||
version: $(parameters.version)
|
||||
version: $(parameters.version)
|
||||
|
||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -27,7 +27,7 @@
|
||||
url = https://github.com/ReinUsesLisp/sirit
|
||||
[submodule "mbedtls"]
|
||||
path = externals/mbedtls
|
||||
url = https://github.com/DarkLordZach/mbedtls
|
||||
url = https://github.com/yuzu-emu/mbedtls
|
||||
[submodule "libzip"]
|
||||
path = externals/libzip/libzip
|
||||
url = https://github.com/nih-at/libzip.git
|
||||
|
||||
@@ -172,6 +172,8 @@ macro(yuzu_find_packages)
|
||||
"nlohmann_json 3.8 nlohmann_json/3.8.0"
|
||||
"ZLIB 1.2 zlib/1.2.11"
|
||||
"zstd 1.4 zstd/1.4.8"
|
||||
# can't use opus until AVX check is fixed: https://github.com/yuzu-emu/yuzu/pull/4068
|
||||
#"opus 1.3 opus/1.3.1"
|
||||
)
|
||||
|
||||
foreach(PACKAGE ${REQUIRED_LIBS})
|
||||
|
||||
10
externals/CMakeLists.txt
vendored
10
externals/CMakeLists.txt
vendored
@@ -64,8 +64,8 @@ endif()
|
||||
add_subdirectory(sirit)
|
||||
|
||||
# libzip
|
||||
find_package(Libzip 1.5)
|
||||
if (NOT LIBZIP_FOUND)
|
||||
find_package(libzip 1.5)
|
||||
if (NOT libzip_FOUND)
|
||||
message(STATUS "libzip 1.5 or newer not found, falling back to externals")
|
||||
add_subdirectory(libzip EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
@@ -97,4 +97,8 @@ if (ENABLE_WEB_SERVICE)
|
||||
endif()
|
||||
|
||||
# Opus
|
||||
add_subdirectory(opus)
|
||||
find_package(opus 1.3)
|
||||
if (NOT opus_FOUND)
|
||||
message(STATUS "opus 1.3 or newer not found, falling back to externals")
|
||||
add_subdirectory(opus EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: cafa687684...b2a4da5e65
72
externals/find-modules/FindLibzip.cmake
vendored
72
externals/find-modules/FindLibzip.cmake
vendored
@@ -1,72 +0,0 @@
|
||||
|
||||
find_package(PkgConfig QUIET)
|
||||
pkg_check_modules(PC_LIBZIP QUIET libzip)
|
||||
|
||||
find_path(LIBZIP_INCLUDE_DIR
|
||||
NAMES zip.h
|
||||
PATHS ${PC_LIBZIP_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{INCLUDE}"
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
)
|
||||
find_path(LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
NAMES zipconf.h
|
||||
HINTS ${PC_LIBZIP_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{LIB_DIR}/lib/libzip/include"
|
||||
"$ENV{LIB}/lib/libzip/include"
|
||||
/usr/local/lib/libzip/include
|
||||
/usr/lib/libzip/include
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
"$ENV{INCLUDE}"
|
||||
)
|
||||
find_library(LIBZIP_LIBRARY
|
||||
NAMES zip
|
||||
PATHS ${PC_LIBZIP_LIBRARY_DIRS}
|
||||
"$ENV{LIB_DIR}/lib" "$ENV{LIB}" /usr/local/lib /usr/lib
|
||||
)
|
||||
|
||||
if (LIBZIP_INCLUDE_DIR_ZIPCONF)
|
||||
FILE(READ "${LIBZIP_INCLUDE_DIR_ZIPCONF}/zipconf.h" _LIBZIP_VERSION_CONTENTS)
|
||||
if (_LIBZIP_VERSION_CONTENTS)
|
||||
STRING(REGEX REPLACE ".*#define LIBZIP_VERSION \"([0-9.]+)\".*" "\\1" LIBZIP_VERSION "${_LIBZIP_VERSION_CONTENTS}")
|
||||
endif()
|
||||
unset(_LIBZIP_VERSION_CONTENTS)
|
||||
endif()
|
||||
|
||||
set(LIBZIP_VERSION ${LIBZIP_VERSION} CACHE STRING "Version number of libzip")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Libzip
|
||||
FOUND_VAR LIBZIP_FOUND
|
||||
REQUIRED_VARS
|
||||
LIBZIP_LIBRARY
|
||||
LIBZIP_INCLUDE_DIR
|
||||
LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
LIBZIP_VERSION
|
||||
VERSION_VAR LIBZIP_VERSION
|
||||
)
|
||||
|
||||
if(LIBZIP_FOUND)
|
||||
set(LIBZIP_LIBRARIES ${LIBZIP_LIBRARY})
|
||||
set(LIBZIP_INCLUDE_DIRS ${LIBZIP_INCLUDE_DIR})
|
||||
set(LIBZIP_DEFINITIONS ${PC_LIBZIP_CFLAGS_OTHER})
|
||||
endif()
|
||||
|
||||
if(LIBZIP_FOUND AND NOT TARGET libzip::libzip)
|
||||
add_library(libzip::libzip UNKNOWN IMPORTED)
|
||||
set_target_properties(libzip::libzip PROPERTIES
|
||||
IMPORTED_LOCATION "${LIBZIP_LIBRARY}"
|
||||
INTERFACE_COMPILE_OPTIONS "${PC_LIBZIP_CFLAGS_OTHER}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${LIBZIP_INCLUDE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
LIBZIP_INCLUDE_DIR
|
||||
LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
LIBZIP_LIBRARY
|
||||
LIBZIP_VERSION
|
||||
)
|
||||
72
externals/find-modules/Findlibzip.cmake
vendored
Normal file
72
externals/find-modules/Findlibzip.cmake
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
|
||||
find_package(PkgConfig QUIET)
|
||||
pkg_check_modules(PC_libzip QUIET libzip)
|
||||
|
||||
find_path(libzip_INCLUDE_DIR
|
||||
NAMES zip.h
|
||||
PATHS ${PC_libzip_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{INCLUDE}"
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
)
|
||||
find_path(libzip_INCLUDE_DIR_ZIPCONF
|
||||
NAMES zipconf.h
|
||||
HINTS ${PC_libzip_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{LIB_DIR}/lib/libzip/include"
|
||||
"$ENV{LIB}/lib/libzip/include"
|
||||
/usr/local/lib/libzip/include
|
||||
/usr/lib/libzip/include
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
"$ENV{INCLUDE}"
|
||||
)
|
||||
find_library(libzip_LIBRARY
|
||||
NAMES zip
|
||||
PATHS ${PC_libzip_LIBRARY_DIRS}
|
||||
"$ENV{LIB_DIR}/lib" "$ENV{LIB}" /usr/local/lib /usr/lib
|
||||
)
|
||||
|
||||
if (libzip_INCLUDE_DIR_ZIPCONF)
|
||||
FILE(READ "${libzip_INCLUDE_DIR_ZIPCONF}/zipconf.h" _libzip_VERSION_CONTENTS)
|
||||
if (_libzip_VERSION_CONTENTS)
|
||||
STRING(REGEX REPLACE ".*#define LIBZIP_VERSION \"([0-9.]+)\".*" "\\1" libzip_VERSION "${_libzip_VERSION_CONTENTS}")
|
||||
endif()
|
||||
unset(_libzip_VERSION_CONTENTS)
|
||||
endif()
|
||||
|
||||
set(libzip_VERSION ${libzip_VERSION} CACHE STRING "Version number of libzip")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(libzip
|
||||
FOUND_VAR libzip_FOUND
|
||||
REQUIRED_VARS
|
||||
libzip_LIBRARY
|
||||
libzip_INCLUDE_DIR
|
||||
libzip_INCLUDE_DIR_ZIPCONF
|
||||
libzip_VERSION
|
||||
VERSION_VAR libzip_VERSION
|
||||
)
|
||||
|
||||
if(libzip_FOUND)
|
||||
set(libzip_LIBRARIES ${libzip_LIBRARY})
|
||||
set(libzip_INCLUDE_DIRS ${libzip_INCLUDE_DIR})
|
||||
set(libzip_DEFINITIONS ${PC_libzip_CFLAGS_OTHER})
|
||||
endif()
|
||||
|
||||
if(libzip_FOUND AND NOT TARGET libzip::libzip)
|
||||
add_library(libzip::libzip UNKNOWN IMPORTED)
|
||||
set_target_properties(libzip::libzip PROPERTIES
|
||||
IMPORTED_LOCATION "${libzip_LIBRARY}"
|
||||
INTERFACE_COMPILE_OPTIONS "${PC_libzip_CFLAGS_OTHER}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${libzip_INCLUDE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
libzip_INCLUDE_DIR
|
||||
libzip_INCLUDE_DIR_ZIPCONF
|
||||
libzip_LIBRARY
|
||||
libzip_VERSION
|
||||
)
|
||||
2
externals/find-modules/Findopus.cmake
vendored
2
externals/find-modules/Findopus.cmake
vendored
@@ -28,7 +28,7 @@ if(opus_FOUND)
|
||||
endif()
|
||||
|
||||
if(opus_FOUND AND NOT TARGET Opus::Opus)
|
||||
add_library(Opus::Opus UNKNOWN IMPORTED)
|
||||
add_library(Opus::Opus UNKNOWN IMPORTED GLOBAL)
|
||||
set_target_properties(Opus::Opus PROPERTIES
|
||||
IMPORTED_LOCATION "${opus_LIBRARY}"
|
||||
INTERFACE_COMPILE_OPTIONS "${PC_opus_CFLAGS_OTHER}"
|
||||
|
||||
3
externals/glad/include/glad/glad.h
vendored
3
externals/glad/include/glad/glad.h
vendored
@@ -5156,6 +5156,9 @@ GLAPI PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv;
|
||||
typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDPROC)(GLuint index, GLdouble n, GLdouble f);
|
||||
GLAPI PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed;
|
||||
#define glDepthRangeIndexed glad_glDepthRangeIndexed
|
||||
typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDDNVPROC)(GLuint index, GLdouble n, GLdouble f);
|
||||
GLAPI PFNGLDEPTHRANGEINDEXEDDNVPROC glad_glDepthRangeIndexeddNV;
|
||||
#define glDepthRangeIndexeddNV glad_glDepthRangeIndexeddNV
|
||||
typedef void (APIENTRYP PFNGLGETFLOATI_VPROC)(GLenum target, GLuint index, GLfloat *data);
|
||||
GLAPI PFNGLGETFLOATI_VPROC glad_glGetFloati_v;
|
||||
#define glGetFloati_v glad_glGetFloati_v
|
||||
|
||||
2
externals/glad/src/glad.c
vendored
2
externals/glad/src/glad.c
vendored
@@ -1044,6 +1044,7 @@ PFNGLDEPTHMASKPROC glad_glDepthMask = NULL;
|
||||
PFNGLDEPTHRANGEPROC glad_glDepthRange = NULL;
|
||||
PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv = NULL;
|
||||
PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed = NULL;
|
||||
PFNGLDEPTHRANGEINDEXEDDNVPROC glad_glDepthRangeIndexeddNV = NULL;
|
||||
PFNGLDEPTHRANGEFPROC glad_glDepthRangef = NULL;
|
||||
PFNGLDETACHSHADERPROC glad_glDetachShader = NULL;
|
||||
PFNGLDISABLEPROC glad_glDisable = NULL;
|
||||
@@ -7971,6 +7972,7 @@ static void load_GL_NV_depth_buffer_float(GLADloadproc load) {
|
||||
glad_glDepthRangedNV = (PFNGLDEPTHRANGEDNVPROC)load("glDepthRangedNV");
|
||||
glad_glClearDepthdNV = (PFNGLCLEARDEPTHDNVPROC)load("glClearDepthdNV");
|
||||
glad_glDepthBoundsdNV = (PFNGLDEPTHBOUNDSDNVPROC)load("glDepthBoundsdNV");
|
||||
glad_glDepthRangeIndexeddNV = (PFNGLDEPTHRANGEINDEXEDDNVPROC)load("glDepthRangeIndexeddNV");
|
||||
}
|
||||
static void load_GL_NV_draw_texture(GLADloadproc load) {
|
||||
if(!GLAD_GL_NV_draw_texture) return;
|
||||
|
||||
5
externals/libusb/CMakeLists.txt
vendored
5
externals/libusb/CMakeLists.txt
vendored
@@ -1,3 +1,8 @@
|
||||
# Ensure libusb compiles with UTF-8 encoding on MSVC
|
||||
if(MSVC)
|
||||
add_compile_options(/utf-8)
|
||||
endif()
|
||||
|
||||
add_library(usb STATIC EXCLUDE_FROM_ALL
|
||||
libusb/libusb/core.c
|
||||
libusb/libusb/core.c
|
||||
|
||||
2
externals/mbedtls
vendored
2
externals/mbedtls
vendored
Submodule externals/mbedtls updated: a280e602f3...eac2416b8f
2
externals/opus/CMakeLists.txt
vendored
2
externals/opus/CMakeLists.txt
vendored
@@ -252,3 +252,5 @@ PRIVATE
|
||||
opus/silk/float
|
||||
opus/src
|
||||
)
|
||||
|
||||
add_library(Opus::Opus ALIAS opus)
|
||||
|
||||
@@ -27,6 +27,7 @@ if (MSVC)
|
||||
# /Zo - Enhanced debug info for optimized builds
|
||||
# /permissive- - Enables stricter C++ standards conformance checks
|
||||
# /EHsc - C++-only exception handling semantics
|
||||
# /utf-8 - Set source and execution character sets to UTF-8
|
||||
# /volatile:iso - Use strict standards-compliant volatile semantics.
|
||||
# /Zc:externConstexpr - Allow extern constexpr variables to have external linkage, like the standard mandates
|
||||
# /Zc:inline - Let codegen omit inline functions in object files
|
||||
@@ -38,6 +39,7 @@ if (MSVC)
|
||||
/permissive-
|
||||
/EHsc
|
||||
/std:c++latest
|
||||
/utf-8
|
||||
/volatile:iso
|
||||
/Zc:externConstexpr
|
||||
/Zc:inline
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "audio_core/sink_details.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/settings.h"
|
||||
#include "common/settings.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
#include "audio_core/info_updater.h"
|
||||
#include "audio_core/voice_context.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace {
|
||||
[[nodiscard]] static constexpr s16 ClampToS16(s32 value) {
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/ring_buffer.h"
|
||||
#include "core/settings.h"
|
||||
#include "common/settings.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <objbase.h>
|
||||
|
||||
@@ -11,8 +11,8 @@
|
||||
#include "audio_core/stream.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
|
||||
@@ -97,6 +97,7 @@ add_custom_command(OUTPUT scm_rev.cpp
|
||||
add_library(common STATIC
|
||||
algorithm.h
|
||||
alignment.h
|
||||
assert.cpp
|
||||
assert.h
|
||||
atomic_ops.h
|
||||
detached_tasks.cpp
|
||||
@@ -109,6 +110,7 @@ add_library(common STATIC
|
||||
cityhash.h
|
||||
common_funcs.h
|
||||
common_paths.h
|
||||
common_sizes.h
|
||||
common_types.h
|
||||
concepts.h
|
||||
div_ceil.h
|
||||
@@ -150,6 +152,10 @@ add_library(common STATIC
|
||||
scm_rev.cpp
|
||||
scm_rev.h
|
||||
scope_exit.h
|
||||
settings.cpp
|
||||
settings.h
|
||||
settings_input.cpp
|
||||
settings_input.h
|
||||
spin_lock.cpp
|
||||
spin_lock.h
|
||||
stream.cpp
|
||||
|
||||
14
src/common/assert.cpp
Normal file
14
src/common/assert.cpp
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
|
||||
#include "common/settings.h"
|
||||
|
||||
void assert_handle_failure() {
|
||||
if (Settings::values.use_debug_asserts) {
|
||||
Crash();
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,13 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
// Sometimes we want to try to continue even after hitting an assert.
|
||||
// However touching this file yields a global recompilation as this header is included almost
|
||||
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
|
||||
void assert_handle_failure();
|
||||
|
||||
// For asserts we'd like to keep all the junk executed when an assert happens away from the
|
||||
// important code in the function. One way of doing this is to put all the relevant code inside a
|
||||
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
|
||||
@@ -17,15 +20,14 @@
|
||||
// enough for our purposes.
|
||||
template <typename Fn>
|
||||
#if defined(_MSC_VER)
|
||||
[[msvc::noinline, noreturn]]
|
||||
[[msvc::noinline]]
|
||||
#elif defined(__GNUC__)
|
||||
[[gnu::cold, gnu::noinline, noreturn]]
|
||||
[[gnu::cold, gnu::noinline]]
|
||||
#endif
|
||||
static void
|
||||
assert_noinline_call(const Fn& fn) {
|
||||
fn();
|
||||
Crash();
|
||||
exit(1); // Keeps GCC's mouth shut about this actually returning
|
||||
assert_handle_failure();
|
||||
}
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
|
||||
@@ -61,6 +61,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
@@ -52,9 +52,13 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
|
||||
// Generic function to get last error message.
|
||||
// Call directly after the command or use the error num.
|
||||
// This function might change the error code.
|
||||
// Defined in Misc.cpp.
|
||||
// Defined in misc.cpp.
|
||||
[[nodiscard]] std::string GetLastErrorMsg();
|
||||
|
||||
// Like GetLastErrorMsg(), but passing an explicit error code.
|
||||
// Defined in misc.cpp.
|
||||
[[nodiscard]] std::string NativeErrorToString(int e);
|
||||
|
||||
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
|
||||
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
|
||||
using T = std::underlying_type_t<type>; \
|
||||
|
||||
43
src/common/common_sizes.h
Normal file
43
src/common/common_sizes.h
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
enum : u64 {
|
||||
Size_1_KB = 0x400ULL,
|
||||
Size_64_KB = 64ULL * Size_1_KB,
|
||||
Size_128_KB = 128ULL * Size_1_KB,
|
||||
Size_1_MB = 0x100000ULL,
|
||||
Size_2_MB = 2ULL * Size_1_MB,
|
||||
Size_4_MB = 4ULL * Size_1_MB,
|
||||
Size_5_MB = 5ULL * Size_1_MB,
|
||||
Size_14_MB = 14ULL * Size_1_MB,
|
||||
Size_32_MB = 32ULL * Size_1_MB,
|
||||
Size_33_MB = 33ULL * Size_1_MB,
|
||||
Size_128_MB = 128ULL * Size_1_MB,
|
||||
Size_448_MB = 448ULL * Size_1_MB,
|
||||
Size_507_MB = 507ULL * Size_1_MB,
|
||||
Size_562_MB = 562ULL * Size_1_MB,
|
||||
Size_1554_MB = 1554ULL * Size_1_MB,
|
||||
Size_2048_MB = 2048ULL * Size_1_MB,
|
||||
Size_2193_MB = 2193ULL * Size_1_MB,
|
||||
Size_3285_MB = 3285ULL * Size_1_MB,
|
||||
Size_4916_MB = 4916ULL * Size_1_MB,
|
||||
Size_1_GB = 0x40000000ULL,
|
||||
Size_2_GB = 2ULL * Size_1_GB,
|
||||
Size_4_GB = 4ULL * Size_1_GB,
|
||||
Size_6_GB = 6ULL * Size_1_GB,
|
||||
Size_8_GB = 8ULL * Size_1_GB,
|
||||
Size_64_GB = 64ULL * Size_1_GB,
|
||||
Size_512_GB = 512ULL * Size_1_GB,
|
||||
Size_Invalid = std::numeric_limits<u64>::max(),
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace Common {
|
||||
|
||||
constexpr std::size_t default_stack_size = 256 * 1024;
|
||||
constexpr std::size_t default_stack_size = 512 * 1024;
|
||||
|
||||
struct Fiber::FiberImpl {
|
||||
FiberImpl() : stack{default_stack_size}, rewind_stack{default_stack_size} {}
|
||||
@@ -116,16 +116,19 @@ void Fiber::Rewind() {
|
||||
boost::context::detail::jump_fcontext(impl->rewind_context, this);
|
||||
}
|
||||
|
||||
void Fiber::YieldTo(std::shared_ptr<Fiber> from, std::shared_ptr<Fiber> to) {
|
||||
ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
|
||||
ASSERT_MSG(to != nullptr, "Next fiber is null!");
|
||||
to->impl->guard.lock();
|
||||
to->impl->previous_fiber = from;
|
||||
auto transfer = boost::context::detail::jump_fcontext(to->impl->context, to.get());
|
||||
ASSERT(from->impl->previous_fiber != nullptr);
|
||||
from->impl->previous_fiber->impl->context = transfer.fctx;
|
||||
from->impl->previous_fiber->impl->guard.unlock();
|
||||
from->impl->previous_fiber.reset();
|
||||
void Fiber::YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to) {
|
||||
to.impl->guard.lock();
|
||||
to.impl->previous_fiber = weak_from.lock();
|
||||
|
||||
auto transfer = boost::context::detail::jump_fcontext(to.impl->context, &to);
|
||||
|
||||
// "from" might no longer be valid if the thread was killed
|
||||
if (auto from = weak_from.lock()) {
|
||||
ASSERT(from->impl->previous_fiber != nullptr);
|
||||
from->impl->previous_fiber->impl->context = transfer.fctx;
|
||||
from->impl->previous_fiber->impl->guard.unlock();
|
||||
from->impl->previous_fiber.reset();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
|
||||
|
||||
@@ -41,7 +41,7 @@ public:
|
||||
|
||||
/// Yields control from Fiber 'from' to Fiber 'to'
|
||||
/// Fiber 'from' must be the currently running fiber.
|
||||
static void YieldTo(std::shared_ptr<Fiber> from, std::shared_ptr<Fiber> to);
|
||||
static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to);
|
||||
[[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber();
|
||||
|
||||
void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param);
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
#include "common/logging/backend.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/logging/text_formatter.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
/**
|
||||
* Static state as a singleton.
|
||||
@@ -37,8 +37,11 @@ public:
|
||||
return backend;
|
||||
}
|
||||
|
||||
Impl(Impl const&) = delete;
|
||||
const Impl& operator=(Impl const&) = delete;
|
||||
Impl(const Impl&) = delete;
|
||||
Impl& operator=(const Impl&) = delete;
|
||||
|
||||
Impl(Impl&&) = delete;
|
||||
Impl& operator=(Impl&&) = delete;
|
||||
|
||||
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
|
||||
const char* function, std::string message) {
|
||||
@@ -132,7 +135,7 @@ private:
|
||||
std::mutex writing_mutex;
|
||||
std::thread backend_thread;
|
||||
std::vector<std::unique_ptr<Backend>> backends;
|
||||
Common::MPSCQueue<Log::Entry> message_queue;
|
||||
MPSCQueue<Entry> message_queue;
|
||||
Filter filter;
|
||||
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
|
||||
};
|
||||
@@ -146,16 +149,16 @@ void ColorConsoleBackend::Write(const Entry& entry) {
|
||||
}
|
||||
|
||||
FileBackend::FileBackend(const std::string& filename) : bytes_written(0) {
|
||||
if (Common::FS::Exists(filename + ".old.txt")) {
|
||||
Common::FS::Delete(filename + ".old.txt");
|
||||
if (FS::Exists(filename + ".old.txt")) {
|
||||
FS::Delete(filename + ".old.txt");
|
||||
}
|
||||
if (Common::FS::Exists(filename)) {
|
||||
Common::FS::Rename(filename, filename + ".old.txt");
|
||||
if (FS::Exists(filename)) {
|
||||
FS::Rename(filename, filename + ".old.txt");
|
||||
}
|
||||
|
||||
// _SH_DENYWR allows read only access to the file for other programs.
|
||||
// It is #defined to 0 on other platforms
|
||||
file = Common::FS::IOFile(filename, "w", _SH_DENYWR);
|
||||
file = FS::IOFile(filename, "w", _SH_DENYWR);
|
||||
}
|
||||
|
||||
void FileBackend::Write(const Entry& entry) {
|
||||
@@ -182,7 +185,7 @@ void FileBackend::Write(const Entry& entry) {
|
||||
|
||||
void DebuggerBackend::Write(const Entry& entry) {
|
||||
#ifdef _WIN32
|
||||
::OutputDebugStringW(Common::UTF8ToUTF16W(FormatLogMessage(entry).append(1, '\n')).c_str());
|
||||
::OutputDebugStringW(UTF8ToUTF16W(FormatLogMessage(entry).append(1, '\n')).c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -212,6 +215,7 @@ void DebuggerBackend::Write(const Entry& entry) {
|
||||
SUB(Service, ARP) \
|
||||
SUB(Service, BCAT) \
|
||||
SUB(Service, BPC) \
|
||||
SUB(Service, BGTC) \
|
||||
SUB(Service, BTDRV) \
|
||||
SUB(Service, BTM) \
|
||||
SUB(Service, Capture) \
|
||||
@@ -341,4 +345,4 @@ void FmtLogMessageImpl(Class log_class, Level log_level, const char* filename,
|
||||
instance.PushEntry(log_class, log_level, filename, line_num, function,
|
||||
fmt::vformat(format, args));
|
||||
}
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "common/logging/filter.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
class Filter;
|
||||
|
||||
@@ -135,4 +135,4 @@ const char* GetLevelName(Level log_level);
|
||||
* never get the message
|
||||
*/
|
||||
void SetGlobalFilter(const Filter& filter);
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "common/logging/filter.h"
|
||||
#include "common/string_util.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
namespace {
|
||||
template <typename It>
|
||||
Level GetLevelByName(const It begin, const It end) {
|
||||
@@ -103,4 +103,4 @@ bool Filter::IsDebug() const {
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <string_view>
|
||||
#include "common/logging/log.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
/**
|
||||
* Implements a log message filter which allows different log classes to have different minimum
|
||||
@@ -51,4 +51,4 @@ public:
|
||||
private:
|
||||
std::array<Level, static_cast<std::size_t>(Class::Count)> class_levels;
|
||||
};
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <fmt/format.h>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
// trims up to and including the last of ../, ..\, src/, src\ in a string
|
||||
constexpr const char* TrimSourcePath(std::string_view source) {
|
||||
@@ -66,6 +66,7 @@ enum class Class : ClassType {
|
||||
Service_ARP, ///< The ARP service
|
||||
Service_Audio, ///< The Audio (Audio control) service
|
||||
Service_BCAT, ///< The BCAT service
|
||||
Service_BGTC, ///< The BGTC (Background Task Controller) service
|
||||
Service_BPC, ///< The BPC service
|
||||
Service_BTDRV, ///< The Bluetooth driver service
|
||||
Service_BTM, ///< The BTM service
|
||||
@@ -147,28 +148,34 @@ void FmtLogMessage(Class log_class, Level log_level, const char* filename, unsig
|
||||
fmt::make_format_args(args...));
|
||||
}
|
||||
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define LOG_TRACE(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Trace, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Trace, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
#else
|
||||
#define LOG_TRACE(log_class, fmt, ...) (void(0))
|
||||
#endif
|
||||
|
||||
#define LOG_DEBUG(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Debug, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Debug, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
#define LOG_INFO(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Info, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Info, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
#define LOG_WARNING(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Warning, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Warning, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
#define LOG_ERROR(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Error, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Error, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
#define LOG_CRITICAL(log_class, ...) \
|
||||
::Log::FmtLogMessage(::Log::Class::log_class, ::Log::Level::Critical, \
|
||||
::Log::TrimSourcePath(__FILE__), __LINE__, __func__, __VA_ARGS__)
|
||||
Common::Log::FmtLogMessage(Common::Log::Class::log_class, Common::Log::Level::Critical, \
|
||||
Common::Log::TrimSourcePath(__FILE__), __LINE__, __func__, \
|
||||
__VA_ARGS__)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#include "common/logging/text_formatter.h"
|
||||
#include "common/string_util.h"
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
std::string FormatLogMessage(const Entry& entry) {
|
||||
unsigned int time_seconds = static_cast<unsigned int>(entry.timestamp.count() / 1000000);
|
||||
@@ -108,4 +108,4 @@ void PrintColoredMessage(const Entry& entry) {
|
||||
#undef ESC
|
||||
#endif
|
||||
}
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
|
||||
namespace Log {
|
||||
namespace Common::Log {
|
||||
|
||||
struct Entry;
|
||||
|
||||
@@ -17,4 +17,4 @@ std::string FormatLogMessage(const Entry& entry);
|
||||
void PrintMessage(const Entry& entry);
|
||||
/// Prints the same message as `PrintMessage`, but colored according to the severity level.
|
||||
void PrintColoredMessage(const Entry& entry);
|
||||
} // namespace Log
|
||||
} // namespace Common::Log
|
||||
|
||||
@@ -12,27 +12,41 @@
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
|
||||
// Generic function to get last error message.
|
||||
// Call directly after the command or use the error num.
|
||||
// This function might change the error code.
|
||||
std::string GetLastErrorMsg() {
|
||||
static constexpr std::size_t buff_size = 255;
|
||||
char err_str[buff_size];
|
||||
|
||||
std::string NativeErrorToString(int e) {
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, GetLastError(),
|
||||
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_str, buff_size, nullptr);
|
||||
return std::string(err_str, buff_size);
|
||||
#elif defined(__GLIBC__) && (_GNU_SOURCE || (_POSIX_C_SOURCE < 200112L && _XOPEN_SOURCE < 600))
|
||||
LPSTR err_str;
|
||||
|
||||
DWORD res = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
nullptr, e, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||
reinterpret_cast<LPSTR>(&err_str), 1, nullptr);
|
||||
if (!res) {
|
||||
return "(FormatMessageA failed to format error)";
|
||||
}
|
||||
std::string ret(err_str);
|
||||
LocalFree(err_str);
|
||||
return ret;
|
||||
#else
|
||||
char err_str[255];
|
||||
#if defined(__GLIBC__) && (_GNU_SOURCE || (_POSIX_C_SOURCE < 200112L && _XOPEN_SOURCE < 600))
|
||||
// Thread safe (GNU-specific)
|
||||
const char* str = strerror_r(errno, err_str, buff_size);
|
||||
const char* str = strerror_r(e, err_str, sizeof(err_str));
|
||||
return std::string(str);
|
||||
#else
|
||||
// Thread safe (XSI-compliant)
|
||||
const int success = strerror_r(errno, err_str, buff_size);
|
||||
if (success != 0) {
|
||||
return {};
|
||||
int second_err = strerror_r(e, err_str, sizeof(err_str));
|
||||
if (second_err != 0) {
|
||||
return "(strerror_r failed to format error)";
|
||||
}
|
||||
return std::string(err_str);
|
||||
#endif // GLIBC etc.
|
||||
#endif // _WIN32
|
||||
}
|
||||
|
||||
std::string GetLastErrorMsg() {
|
||||
#ifdef _WIN32
|
||||
return NativeErrorToString(GetLastError());
|
||||
#else
|
||||
return NativeErrorToString(errno);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Common {
|
||||
|
||||
/// Configure platform specific flags for Nvidia's driver
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
#include "core/settings.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "common/settings.h"
|
||||
|
||||
namespace Settings {
|
||||
|
||||
@@ -32,14 +29,6 @@ std::string GetTimeZoneString() {
|
||||
return timezones[time_zone_index];
|
||||
}
|
||||
|
||||
void Apply(Core::System& system) {
|
||||
if (system.IsPoweredOn()) {
|
||||
system.Renderer().RefreshBaseSettings();
|
||||
}
|
||||
|
||||
Service::HID::ReloadInputDevices();
|
||||
}
|
||||
|
||||
void LogSettings() {
|
||||
const auto log_setting = [](std::string_view name, const auto& value) {
|
||||
LOG_INFO(Config, "{}: {}", name, value);
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -11,16 +11,13 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "input_common/settings.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
#include "common/common_types.h"
|
||||
#include "common/settings_input.h"
|
||||
|
||||
namespace Settings {
|
||||
|
||||
enum class RendererBackend {
|
||||
enum class RendererBackend : u32 {
|
||||
OpenGL = 0,
|
||||
Vulkan = 1,
|
||||
};
|
||||
@@ -31,7 +28,7 @@ enum class GPUAccuracy : u32 {
|
||||
Extreme = 2,
|
||||
};
|
||||
|
||||
enum class CPUAccuracy {
|
||||
enum class CPUAccuracy : u32 {
|
||||
Accurate = 0,
|
||||
Unsafe = 1,
|
||||
DebugMode = 2,
|
||||
@@ -139,6 +136,7 @@ struct Values {
|
||||
Setting<int> vulkan_device;
|
||||
|
||||
Setting<u16> resolution_factor{1};
|
||||
Setting<int> fullscreen_mode;
|
||||
Setting<int> aspect_ratio;
|
||||
Setting<int> max_anisotropy;
|
||||
Setting<bool> use_frame_limit;
|
||||
@@ -222,6 +220,8 @@ struct Values {
|
||||
bool quest_flag;
|
||||
bool disable_macro_jit;
|
||||
bool extended_logging;
|
||||
bool use_debug_asserts;
|
||||
bool use_auto_stub;
|
||||
|
||||
// Miscellaneous
|
||||
std::string log_filter;
|
||||
@@ -253,7 +253,6 @@ float Volume();
|
||||
|
||||
std::string GetTimeZoneString();
|
||||
|
||||
void Apply(Core::System& system);
|
||||
void LogSettings();
|
||||
|
||||
// Restore the global state of all applicable settings in the Values struct
|
||||
@@ -2,7 +2,7 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "input_common/settings.h"
|
||||
#include "common/settings_input.h"
|
||||
|
||||
namespace Settings {
|
||||
namespace NativeButton {
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <array>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Settings {
|
||||
@@ -83,11 +83,15 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
void Wait() {
|
||||
if (Empty()) {
|
||||
std::unique_lock lock{cv_mutex};
|
||||
cv.wait(lock, [this]() { return !Empty(); });
|
||||
}
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
Wait();
|
||||
T t;
|
||||
Pop(t);
|
||||
return t;
|
||||
@@ -156,6 +160,10 @@ public:
|
||||
return spsc_queue.Pop(t);
|
||||
}
|
||||
|
||||
void Wait() {
|
||||
spsc_queue.Wait();
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
return spsc_queue.PopWait();
|
||||
}
|
||||
|
||||
@@ -98,4 +98,24 @@ namespace Common {
|
||||
#endif
|
||||
}
|
||||
|
||||
// This function divides a u128 by a u32 value and produces two u64 values:
|
||||
// the result of division and the remainder
|
||||
[[nodiscard]] static inline std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor) {
|
||||
u64 remainder = dividend[0] % divisor;
|
||||
u64 accum = dividend[0] / divisor;
|
||||
if (dividend[1] == 0)
|
||||
return {accum, remainder};
|
||||
// We ignore dividend[1] / divisor as that overflows
|
||||
const u64 first_segment = (dividend[1] % divisor) << 32;
|
||||
accum += (first_segment / divisor) << 32;
|
||||
const u64 second_segment = (first_segment % divisor) << 32;
|
||||
accum += (second_segment / divisor);
|
||||
remainder += second_segment % divisor;
|
||||
if (remainder >= divisor) {
|
||||
accum++;
|
||||
remainder -= divisor;
|
||||
}
|
||||
return {accum, remainder};
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -20,9 +20,7 @@ using base_time_point = std::chrono::time_point<base_timer>;
|
||||
class StandardWallClock final : public WallClock {
|
||||
public:
|
||||
explicit StandardWallClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequency_)
|
||||
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, false),
|
||||
emulated_clock_factor{GetFixedPoint64Factor(emulated_clock_frequency, 1000000000)},
|
||||
emulated_cpu_factor{GetFixedPoint64Factor(emulated_cpu_frequency, 1000000000)} {
|
||||
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, false) {
|
||||
start_time = base_timer::now();
|
||||
}
|
||||
|
||||
@@ -45,11 +43,16 @@ public:
|
||||
}
|
||||
|
||||
u64 GetClockCycles() override {
|
||||
return MultiplyHigh(GetTimeNS().count(), emulated_clock_factor);
|
||||
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||
const u128 temporary =
|
||||
Common::Multiply64Into128(time_now.count(), emulated_clock_frequency);
|
||||
return Common::Divide128On32(temporary, 1000000000).first;
|
||||
}
|
||||
|
||||
u64 GetCPUCycles() override {
|
||||
return MultiplyHigh(GetTimeNS().count(), emulated_cpu_factor);
|
||||
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||
const u128 temporary = Common::Multiply64Into128(time_now.count(), emulated_cpu_frequency);
|
||||
return Common::Divide128On32(temporary, 1000000000).first;
|
||||
}
|
||||
|
||||
void Pause([[maybe_unused]] bool is_paused) override {
|
||||
@@ -58,8 +61,6 @@ public:
|
||||
|
||||
private:
|
||||
base_time_point start_time;
|
||||
const u64 emulated_clock_factor;
|
||||
const u64 emulated_cpu_factor;
|
||||
};
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
|
||||
@@ -141,6 +141,9 @@ add_library(core STATIC
|
||||
hardware_interrupt_manager.h
|
||||
hle/ipc.h
|
||||
hle/ipc_helpers.h
|
||||
hle/kernel/board/nintendo/nx/k_system_control.cpp
|
||||
hle/kernel/board/nintendo/nx/k_system_control.h
|
||||
hle/kernel/board/nintendo/nx/secure_monitor.h
|
||||
hle/kernel/client_port.cpp
|
||||
hle/kernel/client_port.h
|
||||
hle/kernel/client_session.cpp
|
||||
@@ -169,9 +172,13 @@ add_library(core STATIC
|
||||
hle/kernel/k_memory_block.h
|
||||
hle/kernel/k_memory_block_manager.cpp
|
||||
hle/kernel/k_memory_block_manager.h
|
||||
hle/kernel/k_memory_layout.cpp
|
||||
hle/kernel/k_memory_layout.board.nintendo_nx.cpp
|
||||
hle/kernel/k_memory_layout.h
|
||||
hle/kernel/k_memory_manager.cpp
|
||||
hle/kernel/k_memory_manager.h
|
||||
hle/kernel/k_memory_region.h
|
||||
hle/kernel/k_memory_region_type.h
|
||||
hle/kernel/k_page_bitmap.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
@@ -196,11 +203,11 @@ add_library(core STATIC
|
||||
hle/kernel/k_spin_lock.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_system_control.cpp
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_queue.h
|
||||
hle/kernel/k_trace.h
|
||||
hle/kernel/k_writable_event.cpp
|
||||
hle/kernel/k_writable_event.h
|
||||
hle/kernel/kernel.cpp
|
||||
@@ -614,8 +621,6 @@ add_library(core STATIC
|
||||
perf_stats.h
|
||||
reporter.cpp
|
||||
reporter.h
|
||||
settings.cpp
|
||||
settings.h
|
||||
telemetry_session.cpp
|
||||
telemetry_session.h
|
||||
tools/freezer.cpp
|
||||
@@ -666,7 +671,7 @@ endif()
|
||||
create_target_directory_groups(core)
|
||||
|
||||
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls opus zip)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::Opus zip)
|
||||
|
||||
if (YUZU_ENABLE_BOXCAT)
|
||||
target_compile_definitions(core PRIVATE -DYUZU_ENABLE_BOXCAT)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/page_table.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_32.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_cp15.h"
|
||||
@@ -18,7 +19,6 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
@@ -114,18 +114,17 @@ public:
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* page_table) const {
|
||||
Dynarmic::A32::UserConfig config;
|
||||
config.callbacks = cb.get();
|
||||
// TODO(bunnei): Implement page table for 32-bit
|
||||
// config.page_table = &page_table.pointers;
|
||||
config.coprocessors[15] = cp15;
|
||||
config.define_unpredictable_behaviour = true;
|
||||
static constexpr std::size_t PAGE_BITS = 12;
|
||||
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table.pointers.data());
|
||||
if (page_table) {
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table->pointers.data());
|
||||
}
|
||||
config.absolute_offset_page_table = true;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
@@ -138,6 +137,10 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
|
||||
// Timing
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 512 * 1024 * 1024;
|
||||
config.far_code_offset = 256 * 1024 * 1024;
|
||||
|
||||
// Safe optimizations
|
||||
if (Settings::values.cpu_accuracy == Settings::CPUAccuracy::DebugMode) {
|
||||
if (!Settings::values.cpuopt_page_tables) {
|
||||
@@ -201,7 +204,8 @@ ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handle
|
||||
: ARM_Interface{system, interrupt_handlers, uses_wall_clock},
|
||||
cb(std::make_unique<DynarmicCallbacks32>(*this)),
|
||||
cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)},
|
||||
jit(MakeJit(nullptr)) {}
|
||||
|
||||
ARM_Dynarmic_32::~ARM_Dynarmic_32() = default;
|
||||
|
||||
@@ -256,9 +260,6 @@ void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
jit->SaveContext(context);
|
||||
ctx.cpu_registers = context.Regs();
|
||||
@@ -268,9 +269,6 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
context.Regs() = ctx.cpu_registers;
|
||||
context.ExtRegs() = ctx.extension_registers;
|
||||
@@ -284,35 +282,31 @@ void ARM_Dynarmic_32::PrepareReschedule() {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearInstructionCache() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->InvalidateCacheRange(static_cast<u32>(addr), size);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearExclusiveState() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearExclusiveState();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
ThreadContext32 ctx{};
|
||||
SaveContext(ctx);
|
||||
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
if (iter != jit_cache.end()) {
|
||||
jit = iter->second;
|
||||
LoadContext(ctx);
|
||||
return;
|
||||
}
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
jit = MakeJit(&page_table);
|
||||
LoadContext(ctx);
|
||||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
|
||||
@@ -68,8 +68,7 @@ public:
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
using JitCacheType =
|
||||
@@ -80,10 +79,10 @@ private:
|
||||
|
||||
std::unique_ptr<DynarmicCallbacks32> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> jit;
|
||||
std::shared_ptr<DynarmicCP15> cp15;
|
||||
std::size_t core_index;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> jit;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/page_table.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/dynarmic/arm_dynarmic_64.h"
|
||||
#include "core/arm/dynarmic/arm_exclusive_monitor.h"
|
||||
@@ -19,7 +20,6 @@
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
@@ -142,7 +142,7 @@ public:
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
Dynarmic::A64::UserConfig config;
|
||||
|
||||
@@ -150,13 +150,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
config.callbacks = cb.get();
|
||||
|
||||
// Memory
|
||||
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
if (page_table) {
|
||||
config.page_table = reinterpret_cast<void**>(page_table->pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
}
|
||||
|
||||
// Multi-process state
|
||||
config.processor_id = core_index;
|
||||
@@ -175,6 +177,10 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
// Timing
|
||||
config.wall_clock_cntpct = uses_wall_clock;
|
||||
|
||||
// Code cache size
|
||||
config.code_cache_size = 512 * 1024 * 1024;
|
||||
config.far_code_offset = 256 * 1024 * 1024;
|
||||
|
||||
// Safe optimizations
|
||||
if (Settings::values.cpu_accuracy == Settings::CPUAccuracy::DebugMode) {
|
||||
if (!Settings::values.cpuopt_page_tables) {
|
||||
@@ -237,7 +243,8 @@ ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handle
|
||||
std::size_t core_index)
|
||||
: ARM_Interface{system, interrupt_handlers, uses_wall_clock},
|
||||
cb(std::make_unique<DynarmicCallbacks64>(*this)), core_index{core_index},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)},
|
||||
jit(MakeJit(nullptr, 48)) {}
|
||||
|
||||
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
|
||||
|
||||
@@ -294,9 +301,6 @@ void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
ctx.cpu_registers = jit->GetRegisters();
|
||||
ctx.sp = jit->GetSP();
|
||||
ctx.pc = jit->GetPC();
|
||||
@@ -308,9 +312,6 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->SetRegisters(ctx.cpu_registers);
|
||||
jit->SetSP(ctx.sp);
|
||||
jit->SetPC(ctx.pc);
|
||||
@@ -326,35 +327,31 @@ void ARM_Dynarmic_64::PrepareReschedule() {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::ClearInstructionCache() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->InvalidateCacheRange(addr, size);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::ClearExclusiveState() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearExclusiveState();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
ThreadContext64 ctx{};
|
||||
SaveContext(ctx);
|
||||
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
if (iter != jit_cache.end()) {
|
||||
jit = iter->second;
|
||||
LoadContext(ctx);
|
||||
return;
|
||||
}
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
jit = MakeJit(&page_table, new_address_space_size_in_bits);
|
||||
LoadContext(ctx);
|
||||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
@@ -71,10 +71,11 @@ private:
|
||||
friend class DynarmicCallbacks64;
|
||||
std::unique_ptr<DynarmicCallbacks64> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A64::Jit> jit;
|
||||
|
||||
std::size_t core_index;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
|
||||
std::shared_ptr<Dynarmic::A64::Jit> jit;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
@@ -36,6 +37,7 @@
|
||||
#include "core/hle/service/apm/controller.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/glue/manager.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/hle/service/time/time_manager.h"
|
||||
@@ -45,7 +47,6 @@
|
||||
#include "core/network/network.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "core/reporter.h"
|
||||
#include "core/settings.h"
|
||||
#include "core/telemetry_session.h"
|
||||
#include "core/tools/freezer.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
@@ -296,28 +297,20 @@ struct System::Impl {
|
||||
exit_lock = false;
|
||||
|
||||
if (gpu_core) {
|
||||
gpu_core->WaitIdle();
|
||||
gpu_core->ShutDown();
|
||||
}
|
||||
|
||||
// Shutdown emulation session
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
|
||||
// Close all CPU/threading state
|
||||
cpu_manager.Shutdown();
|
||||
|
||||
// Shutdown kernel and core timing
|
||||
time_manager.Shutdown();
|
||||
core_timing.Shutdown();
|
||||
kernel.Shutdown();
|
||||
|
||||
// Close app loader
|
||||
app_loader.reset();
|
||||
gpu_core.reset();
|
||||
perf_stats.reset();
|
||||
|
||||
// Clear all applets
|
||||
kernel.Shutdown();
|
||||
applet_manager.ClearAll();
|
||||
|
||||
LOG_DEBUG(Core, "Shutdown OK");
|
||||
@@ -782,4 +775,12 @@ void System::ExecuteProgram(std::size_t program_index) {
|
||||
}
|
||||
}
|
||||
|
||||
void System::ApplySettings() {
|
||||
if (IsPoweredOn()) {
|
||||
Renderer().RefreshBaseSettings();
|
||||
}
|
||||
|
||||
Service::HID::ReloadInputDevices();
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -388,6 +388,9 @@ public:
|
||||
*/
|
||||
void ExecuteProgram(std::size_t program_index);
|
||||
|
||||
/// Applies any changes to settings to this core instance.
|
||||
void ApplySettings();
|
||||
|
||||
private:
|
||||
System();
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
@@ -245,7 +245,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
@@ -271,7 +271,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
scheduler.Unload(scheduler.GetCurrentThread());
|
||||
|
||||
auto& next_scheduler = kernel.Scheduler(current_core);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
|
||||
}
|
||||
|
||||
// May have changed scheduler
|
||||
@@ -363,7 +363,7 @@ void CpuManager::RunThread(std::size_t core) {
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
data.is_running = true;
|
||||
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
data.is_running = false;
|
||||
data.is_paused = true;
|
||||
data.exit_barrier->Wait();
|
||||
|
||||
@@ -105,8 +105,6 @@ void AESCipher<Key, KeySize>::Transcode(const u8* src, std::size_t size, u8* des
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mbedtls_cipher_finish(context, nullptr, nullptr);
|
||||
}
|
||||
|
||||
template <typename Key, std::size_t KeySize>
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "common/file_util.h"
|
||||
#include "common/hex_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/crypto/aes_util.h"
|
||||
#include "core/crypto/key_manager.h"
|
||||
@@ -32,7 +33,6 @@
|
||||
#include "core/file_sys/registered_cache.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/loader/loader.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core::Crypto {
|
||||
namespace {
|
||||
|
||||
@@ -100,6 +100,14 @@ u64 NACP::GetDeviceSaveDataSize() const {
|
||||
return raw.device_save_data_size;
|
||||
}
|
||||
|
||||
u32 NACP::GetParentalControlFlag() const {
|
||||
return raw.parental_control;
|
||||
}
|
||||
|
||||
const std::array<u8, 0x20>& NACP::GetRatingAge() const {
|
||||
return raw.rating_age;
|
||||
}
|
||||
|
||||
std::vector<u8> NACP::GetRawBytes() const {
|
||||
std::vector<u8> out(sizeof(RawNACP));
|
||||
std::memcpy(out.data(), &raw, sizeof(RawNACP));
|
||||
|
||||
@@ -114,6 +114,8 @@ public:
|
||||
std::vector<u8> GetRawBytes() const;
|
||||
bool GetUserAccountSwitchLock() const;
|
||||
u64 GetDeviceSaveDataSize() const;
|
||||
u32 GetParentalControlFlag() const;
|
||||
const std::array<u8, 0x20>& GetRatingAge() const;
|
||||
|
||||
private:
|
||||
RawNACP raw{};
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "common/file_util.h"
|
||||
#include "common/hex_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/core.h"
|
||||
#include "core/file_sys/common_funcs.h"
|
||||
@@ -25,7 +26,6 @@
|
||||
#include "core/loader/loader.h"
|
||||
#include "core/loader/nso.h"
|
||||
#include "core/memory/cheat_engine.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace FileSys {
|
||||
namespace {
|
||||
|
||||
@@ -14,7 +14,7 @@ namespace NgWord1Data {
|
||||
constexpr std::size_t NUMBER_WORD_TXT_FILES = 0x10;
|
||||
|
||||
// Should this archive replacement mysteriously not work on a future game, consider updating.
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x19}; // 5.1.0 System Version
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x20}; // 11.0.1 System Version
|
||||
|
||||
constexpr std::array<u8, 30> WORD_TXT{
|
||||
0xFE, 0xFF, 0x00, 0x5E, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x79, 0x00, 0x62, 0x00,
|
||||
@@ -43,7 +43,7 @@ namespace NgWord2Data {
|
||||
constexpr std::size_t NUMBER_AC_NX_FILES = 0x10;
|
||||
|
||||
// Should this archive replacement mysteriously not work on a future game, consider updating.
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x15}; // 5.1.0 System Version
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x1A}; // 11.0.1 System Version
|
||||
|
||||
constexpr std::array<u8, 0x2C> AC_NX_DATA{
|
||||
0x1F, 0x8B, 0x08, 0x08, 0xD5, 0x2C, 0x09, 0x5C, 0x04, 0x00, 0x61, 0x63, 0x72, 0x61, 0x77,
|
||||
|
||||
@@ -14,15 +14,15 @@ namespace SystemVersionData {
|
||||
|
||||
constexpr u8 VERSION_MAJOR = 11;
|
||||
constexpr u8 VERSION_MINOR = 0;
|
||||
constexpr u8 VERSION_MICRO = 0;
|
||||
constexpr u8 VERSION_MICRO = 1;
|
||||
|
||||
constexpr u8 REVISION_MAJOR = 5;
|
||||
constexpr u8 REVISION_MAJOR = 1;
|
||||
constexpr u8 REVISION_MINOR = 0;
|
||||
|
||||
constexpr char PLATFORM_STRING[] = "NX";
|
||||
constexpr char VERSION_HASH[] = "34197eba8810e2edd5e9dfcfbde7b340882e856d";
|
||||
constexpr char DISPLAY_VERSION[] = "11.0.0";
|
||||
constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 11.0.0-5.0";
|
||||
constexpr char VERSION_HASH[] = "69103fcb2004dace877094c2f8c29e6113be5dbf";
|
||||
constexpr char DISPLAY_VERSION[] = "11.0.1";
|
||||
constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 11.0.1-1.0";
|
||||
|
||||
} // namespace SystemVersionData
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/settings.h"
|
||||
#include "core/frontend/applets/profile_select.h"
|
||||
#include "core/hle/service/acc/profile_manager.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
|
||||
#include <cmath>
|
||||
#include <mutex>
|
||||
#include "common/settings.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "core/frontend/input.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
#include <cmath>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/frontend/framebuffer_layout.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Layout {
|
||||
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// All architectures must define NumArchitectureDeviceRegions.
|
||||
constexpr inline const auto NumArchitectureDeviceRegions = 3;
|
||||
|
||||
constexpr inline const auto KMemoryRegionType_Uart =
|
||||
KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 0);
|
||||
constexpr inline const auto KMemoryRegionType_InterruptCpuInterface =
|
||||
KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 1)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
constexpr inline const auto KMemoryRegionType_InterruptDistributor =
|
||||
KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 2)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
static_assert(KMemoryRegionType_Uart.GetValue() == (0x1D));
|
||||
static_assert(KMemoryRegionType_InterruptCpuInterface.GetValue() ==
|
||||
(0x2D | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_InterruptDistributor.GetValue() ==
|
||||
(0x4D | KMemoryRegionAttr_NoUserMap));
|
||||
@@ -0,0 +1,52 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// All architectures must define NumBoardDeviceRegions.
|
||||
constexpr inline const auto NumBoardDeviceRegions = 6;
|
||||
// UNUSED: .Derive(NumBoardDeviceRegions, 0);
|
||||
constexpr inline const auto KMemoryRegionType_MemoryController =
|
||||
KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 1)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
constexpr inline const auto KMemoryRegionType_MemoryController1 =
|
||||
KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 2)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
constexpr inline const auto KMemoryRegionType_MemoryController0 =
|
||||
KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 3)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
constexpr inline const auto KMemoryRegionType_PowerManagementController =
|
||||
KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 4).DeriveTransition();
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsDevices =
|
||||
KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 5);
|
||||
static_assert(KMemoryRegionType_MemoryController.GetValue() ==
|
||||
(0x55 | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_MemoryController1.GetValue() ==
|
||||
(0x65 | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_MemoryController0.GetValue() ==
|
||||
(0x95 | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_PowerManagementController.GetValue() == (0x1A5));
|
||||
|
||||
static_assert(KMemoryRegionType_LegacyLpsDevices.GetValue() == 0xC5);
|
||||
|
||||
constexpr inline const auto NumLegacyLpsDevices = 7;
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsExceptionVectors =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 0);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsIram =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 1);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsFlowController =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 2);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsPrimaryICtlr =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 3);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsSemaphore =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 4);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsAtomics =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 5);
|
||||
constexpr inline const auto KMemoryRegionType_LegacyLpsClkRst =
|
||||
KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 6);
|
||||
static_assert(KMemoryRegionType_LegacyLpsExceptionVectors.GetValue() == 0x3C5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsIram.GetValue() == 0x5C5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsFlowController.GetValue() == 0x6C5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsPrimaryICtlr.GetValue() == 0x9C5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsSemaphore.GetValue() == 0xAC5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsAtomics.GetValue() == 0xCC5);
|
||||
static_assert(KMemoryRegionType_LegacyLpsClkRst.GetValue() == 0x11C5);
|
||||
164
src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
Normal file
164
src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "common/common_sizes.h"
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
||||
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
|
||||
#include "core/hle/kernel/k_trace.h"
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
namespace impl {
|
||||
|
||||
constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024;
|
||||
constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024;
|
||||
constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024;
|
||||
|
||||
} // namespace impl
|
||||
|
||||
constexpr const std::size_t RequiredNonSecureSystemMemorySize =
|
||||
impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices +
|
||||
impl::RequiredNonSecureSystemMemorySizeMisc;
|
||||
|
||||
namespace {
|
||||
|
||||
u32 GetMemoryModeForInit() {
|
||||
return 0x01;
|
||||
}
|
||||
|
||||
u32 GetMemorySizeForInit() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
Smc::MemoryArrangement GetMemoryArrangeForInit() {
|
||||
switch (GetMemoryModeForInit() & 0x3F) {
|
||||
case 0x01:
|
||||
default:
|
||||
return Smc::MemoryArrangement_4GB;
|
||||
case 0x02:
|
||||
return Smc::MemoryArrangement_4GBForAppletDev;
|
||||
case 0x03:
|
||||
return Smc::MemoryArrangement_4GBForSystemDev;
|
||||
case 0x11:
|
||||
return Smc::MemoryArrangement_6GB;
|
||||
case 0x12:
|
||||
return Smc::MemoryArrangement_6GBForAppletDev;
|
||||
case 0x21:
|
||||
return Smc::MemoryArrangement_8GB;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Initialization.
|
||||
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
||||
switch (GetMemorySizeForInit()) {
|
||||
case Smc::MemorySize_4GB:
|
||||
default: // All invalid modes should go to 4GB.
|
||||
return Common::Size_4_GB;
|
||||
case Smc::MemorySize_6GB:
|
||||
return Common::Size_6_GB;
|
||||
case Smc::MemorySize_8GB:
|
||||
return Common::Size_8_GB;
|
||||
}
|
||||
}
|
||||
|
||||
PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
|
||||
return base_address;
|
||||
}
|
||||
|
||||
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::size_t KSystemControl::Init::GetApplicationPoolSize() {
|
||||
// Get the base pool size.
|
||||
const size_t base_pool_size = []() -> size_t {
|
||||
switch (GetMemoryArrangeForInit()) {
|
||||
case Smc::MemoryArrangement_4GB:
|
||||
default:
|
||||
return Common::Size_3285_MB;
|
||||
case Smc::MemoryArrangement_4GBForAppletDev:
|
||||
return Common::Size_2048_MB;
|
||||
case Smc::MemoryArrangement_4GBForSystemDev:
|
||||
return Common::Size_3285_MB;
|
||||
case Smc::MemoryArrangement_6GB:
|
||||
return Common::Size_4916_MB;
|
||||
case Smc::MemoryArrangement_6GBForAppletDev:
|
||||
return Common::Size_3285_MB;
|
||||
case Smc::MemoryArrangement_8GB:
|
||||
return Common::Size_4916_MB;
|
||||
}
|
||||
}();
|
||||
|
||||
// Return (possibly) adjusted size.
|
||||
return base_pool_size;
|
||||
}
|
||||
|
||||
size_t KSystemControl::Init::GetAppletPoolSize() {
|
||||
// Get the base pool size.
|
||||
const size_t base_pool_size = []() -> size_t {
|
||||
switch (GetMemoryArrangeForInit()) {
|
||||
case Smc::MemoryArrangement_4GB:
|
||||
default:
|
||||
return Common::Size_507_MB;
|
||||
case Smc::MemoryArrangement_4GBForAppletDev:
|
||||
return Common::Size_1554_MB;
|
||||
case Smc::MemoryArrangement_4GBForSystemDev:
|
||||
return Common::Size_448_MB;
|
||||
case Smc::MemoryArrangement_6GB:
|
||||
return Common::Size_562_MB;
|
||||
case Smc::MemoryArrangement_6GBForAppletDev:
|
||||
return Common::Size_2193_MB;
|
||||
case Smc::MemoryArrangement_8GB:
|
||||
return Common::Size_2193_MB;
|
||||
}
|
||||
}();
|
||||
|
||||
// Return (possibly) adjusted size.
|
||||
constexpr size_t ExtraSystemMemoryForAtmosphere = Common::Size_33_MB;
|
||||
return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize;
|
||||
}
|
||||
|
||||
size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
|
||||
// Verify that our minimum is at least as large as Nintendo's.
|
||||
constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize;
|
||||
static_assert(MinimumSize >= 0x29C8000);
|
||||
|
||||
return MinimumSize;
|
||||
}
|
||||
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
// Handle the case where the difference is too large to represent.
|
||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||
return f();
|
||||
}
|
||||
|
||||
// Iterate until we get a value in range.
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = f(); rnd < effective_max) {
|
||||
return min + (rnd % range_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Board::Nintendo::Nx
|
||||
28
src/core/hle/kernel/board/nintendo/nx/k_system_control.h
Normal file
28
src/core/hle/kernel/board/nintendo/nx/k_system_control.h
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
class Init {
|
||||
public:
|
||||
// Initialization.
|
||||
static std::size_t GetIntendedMemorySize();
|
||||
static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
static std::size_t GetApplicationPoolSize();
|
||||
static std::size_t GetAppletPoolSize();
|
||||
static std::size_t GetMinimumNonSecureSystemPoolSize();
|
||||
};
|
||||
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
};
|
||||
|
||||
} // namespace Kernel::Board::Nintendo::Nx
|
||||
26
src/core/hle/kernel/board/nintendo/nx/secure_monitor.h
Normal file
26
src/core/hle/kernel/board/nintendo/nx/secure_monitor.h
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx::Smc {
|
||||
|
||||
enum MemorySize {
|
||||
MemorySize_4GB = 0,
|
||||
MemorySize_6GB = 1,
|
||||
MemorySize_8GB = 2,
|
||||
};
|
||||
|
||||
enum MemoryArrangement {
|
||||
MemoryArrangement_4GB = 0,
|
||||
MemoryArrangement_4GBForAppletDev = 1,
|
||||
MemoryArrangement_4GBForSystemDev = 2,
|
||||
MemoryArrangement_6GB = 3,
|
||||
MemoryArrangement_6GBForAppletDev = 4,
|
||||
MemoryArrangement_8GB = 5,
|
||||
};
|
||||
|
||||
} // namespace Kernel::Board::Nintendo::Nx::Smc
|
||||
@@ -5,45 +5,34 @@
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
enum : u64 {
|
||||
Size_1_MB = 0x100000,
|
||||
Size_2_MB = 2 * Size_1_MB,
|
||||
Size_128_MB = 128 * Size_1_MB,
|
||||
Size_1_GB = 0x40000000,
|
||||
Size_2_GB = 2 * Size_1_GB,
|
||||
Size_4_GB = 4 * Size_1_GB,
|
||||
Size_6_GB = 6 * Size_1_GB,
|
||||
Size_64_GB = 64 * Size_1_GB,
|
||||
Size_512_GB = 512 * Size_1_GB,
|
||||
Invalid = std::numeric_limits<u64>::max(),
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
{ .bit_width = 32, .address = Common::Size_2_MB , .size = Common::Size_1_GB - Common::Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Common::Size_1_GB , .size = Common::Size_4_GB - Common::Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Common::Size_128_MB , .size = Common::Size_2_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Common::Size_2_GB , .size = Common::Size_64_GB - Common::Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Common::Size_128_MB , .size = Common::Size_512_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
return index < AddressSpaceInfos.size() &&
|
||||
AddressSpaceInfos[index].address != Common::Size_Invalid;
|
||||
}
|
||||
|
||||
using IndexArray =
|
||||
|
||||
199
src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
Normal file
199
src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
#include "core/hle/kernel/k_trace.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr size_t CarveoutAlignment = 0x20000;
|
||||
constexpr size_t CarveoutSizeMax = (512ULL * 1024 * 1024) - CarveoutAlignment;
|
||||
|
||||
bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) {
|
||||
// Above firmware 2.0.0, the PMC is not mappable.
|
||||
return memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) &&
|
||||
memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x7000E400, 0xC00,
|
||||
KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap);
|
||||
}
|
||||
|
||||
void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size,
|
||||
KMemoryRegionType phys_type,
|
||||
KMemoryRegionType virt_type, u32& cur_attr) {
|
||||
const u32 attr = cur_attr++;
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size,
|
||||
static_cast<u32>(phys_type), attr));
|
||||
const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(
|
||||
static_cast<u32>(phys_type), attr);
|
||||
ASSERT(phys != nullptr);
|
||||
ASSERT(phys->GetEndAddress() != 0);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size,
|
||||
static_cast<u32>(virt_type), attr));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace Init {
|
||||
|
||||
void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
|
||||
ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x50041000, 0x1000,
|
||||
KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x50042000, 0x1000,
|
||||
KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
// Map IRAM unconditionally, to support debug-logging-to-iram build config.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap));
|
||||
|
||||
// Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
}
|
||||
|
||||
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
|
||||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||
const PAddr physical_memory_base_address =
|
||||
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
||||
|
||||
// Insert blocks into the tree.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
|
||||
|
||||
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
|
||||
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
|
||||
if constexpr (IsKTraceEnabled) {
|
||||
const PAddr ktrace_buffer_phys_addr =
|
||||
physical_memory_base_address + intended_memory_size - KTraceBufferSize;
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
|
||||
}
|
||||
}
|
||||
|
||||
void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) {
|
||||
// Start by identifying the extents of the DRAM memory region.
|
||||
const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents();
|
||||
ASSERT(dram_extents.GetEndAddress() != 0);
|
||||
|
||||
// Determine the end of the pool region.
|
||||
const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize;
|
||||
|
||||
// Find the start of the kernel DRAM region.
|
||||
const KMemoryRegion* kernel_dram_region =
|
||||
memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived(
|
||||
KMemoryRegionType_DramKernelBase);
|
||||
ASSERT(kernel_dram_region != nullptr);
|
||||
|
||||
const u64 kernel_dram_start = kernel_dram_region->GetAddress();
|
||||
ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment));
|
||||
|
||||
// Find the start of the pool partitions region.
|
||||
const KMemoryRegion* pool_partitions_region =
|
||||
memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(
|
||||
KMemoryRegionType_DramPoolPartition, 0);
|
||||
ASSERT(pool_partitions_region != nullptr);
|
||||
const u64 pool_partitions_start = pool_partitions_region->GetAddress();
|
||||
|
||||
// Setup the pool partition layouts.
|
||||
// On 5.0.0+, setup modern 4-pool-partition layout.
|
||||
|
||||
// Get Application and Applet pool sizes.
|
||||
const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize();
|
||||
const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize();
|
||||
const size_t unsafe_system_pool_min_size =
|
||||
KSystemControl::Init::GetMinimumNonSecureSystemPoolSize();
|
||||
|
||||
// Decide on starting addresses for our pools.
|
||||
const u64 application_pool_start = pool_end - application_pool_size;
|
||||
const u64 applet_pool_start = application_pool_start - applet_pool_size;
|
||||
const u64 unsafe_system_pool_start = std::min(
|
||||
kernel_dram_start + CarveoutSizeMax,
|
||||
Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
|
||||
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
|
||||
|
||||
// We want to arrange application pool depending on where the middle of dram is.
|
||||
const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
|
||||
u32 cur_pool_attr = 0;
|
||||
size_t total_overhead_size = 0;
|
||||
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
|
||||
InsertPoolPartitionRegionIntoBothTrees(
|
||||
memory_layout, application_pool_start, application_pool_size,
|
||||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
|
||||
cur_pool_attr);
|
||||
total_overhead_size +=
|
||||
KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
|
||||
} else {
|
||||
const size_t first_application_pool_size = dram_midpoint - application_pool_start;
|
||||
const size_t second_application_pool_size =
|
||||
application_pool_start + application_pool_size - dram_midpoint;
|
||||
InsertPoolPartitionRegionIntoBothTrees(
|
||||
memory_layout, application_pool_start, first_application_pool_size,
|
||||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
|
||||
cur_pool_attr);
|
||||
InsertPoolPartitionRegionIntoBothTrees(
|
||||
memory_layout, dram_midpoint, second_application_pool_size,
|
||||
KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
|
||||
cur_pool_attr);
|
||||
total_overhead_size +=
|
||||
KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
|
||||
total_overhead_size +=
|
||||
KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
|
||||
}
|
||||
|
||||
// Insert the applet pool.
|
||||
InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size,
|
||||
KMemoryRegionType_DramAppletPool,
|
||||
KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
|
||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
|
||||
|
||||
// Insert the nonsecure system pool.
|
||||
InsertPoolPartitionRegionIntoBothTrees(
|
||||
memory_layout, unsafe_system_pool_start, unsafe_system_pool_size,
|
||||
KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool,
|
||||
cur_pool_attr);
|
||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
|
||||
|
||||
// Insert the pool management region.
|
||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(
|
||||
(unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
|
||||
const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size;
|
||||
const size_t pool_management_size = total_overhead_size;
|
||||
u32 pool_management_attr = 0;
|
||||
InsertPoolPartitionRegionIntoBothTrees(
|
||||
memory_layout, pool_management_start, pool_management_size,
|
||||
KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement,
|
||||
pool_management_attr);
|
||||
|
||||
// Insert the system pool.
|
||||
const u64 system_pool_size = pool_management_start - pool_partitions_start;
|
||||
InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size,
|
||||
KMemoryRegionType_DramSystemPool,
|
||||
KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
||||
}
|
||||
|
||||
} // namespace Init
|
||||
|
||||
} // namespace Kernel
|
||||
166
src/core/hle/kernel/k_memory_layout.cpp
Normal file
166
src/core/hle/kernel/k_memory_layout.cpp
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
template <typename... Args>
|
||||
KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, Args&&... args) {
|
||||
return memory_region_allocator.Allocate(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_)
|
||||
: memory_region_allocator{memory_region_allocator_} {}
|
||||
|
||||
void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
|
||||
this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id));
|
||||
}
|
||||
|
||||
bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
|
||||
// Locate the memory region that contains the address.
|
||||
KMemoryRegion* found = this->FindModifiable(address);
|
||||
|
||||
// We require that the old attr is correct.
|
||||
if (found->GetAttributes() != old_attr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We further require that the region can be split from the old region.
|
||||
const u64 inserted_region_end = address + size;
|
||||
const u64 inserted_region_last = inserted_region_end - 1;
|
||||
if (found->GetLastAddress() < inserted_region_last) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Further, we require that the type id is a valid transformation.
|
||||
if (!found->CanDerive(type_id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cache information from the region before we remove it.
|
||||
const u64 old_address = found->GetAddress();
|
||||
const u64 old_last = found->GetLastAddress();
|
||||
const u64 old_pair = found->GetPairAddress();
|
||||
const u32 old_type = found->GetType();
|
||||
|
||||
// Erase the existing region from the tree.
|
||||
this->erase(this->iterator_to(*found));
|
||||
|
||||
// Insert the new region into the tree.
|
||||
if (old_address == address) {
|
||||
// Reuse the old object for the new region, if we can.
|
||||
found->Reset(address, inserted_region_last, old_pair, new_attr, type_id);
|
||||
this->insert(*found);
|
||||
} else {
|
||||
// If we can't re-use, adjust the old region.
|
||||
found->Reset(old_address, address - 1, old_pair, old_attr, old_type);
|
||||
this->insert(*found);
|
||||
|
||||
// Insert a new region for the split.
|
||||
const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
|
||||
? old_pair + (address - old_address)
|
||||
: old_pair;
|
||||
this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last,
|
||||
new_pair, new_attr, type_id));
|
||||
}
|
||||
|
||||
// If we need to insert a region after the region, do so.
|
||||
if (old_last != inserted_region_last) {
|
||||
const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
|
||||
? old_pair + (inserted_region_end - old_address)
|
||||
: old_pair;
|
||||
this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last,
|
||||
after_pair, old_attr, old_type));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
|
||||
// We want to find the total extents of the type id.
|
||||
const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
|
||||
|
||||
// Ensure that our alignment is correct.
|
||||
ASSERT(Common::IsAligned(extents.GetAddress(), alignment));
|
||||
|
||||
const u64 first_address = extents.GetAddress();
|
||||
const u64 last_address = extents.GetLastAddress();
|
||||
|
||||
const u64 first_index = first_address / alignment;
|
||||
const u64 last_index = last_address / alignment;
|
||||
|
||||
while (true) {
|
||||
const u64 candidate =
|
||||
KSystemControl::GenerateRandomRange(first_index, last_index) * alignment;
|
||||
|
||||
// Ensure that the candidate doesn't overflow with the size.
|
||||
if (!(candidate < candidate + size)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const u64 candidate_last = candidate + size - 1;
|
||||
|
||||
// Ensure that the candidate fits within the region.
|
||||
if (candidate_last > last_address) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Locate the candidate region, and ensure it fits and has the correct type id.
|
||||
if (const auto& candidate_region = *this->Find(candidate);
|
||||
!(candidate_last <= candidate_region.GetLastAddress() &&
|
||||
candidate_region.GetType() == type_id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
KMemoryLayout::KMemoryLayout()
|
||||
: virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator},
|
||||
virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {}
|
||||
|
||||
void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
|
||||
VAddr linear_virtual_start) {
|
||||
// Set static differences.
|
||||
linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
|
||||
linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
|
||||
|
||||
// Initialize linear trees.
|
||||
for (auto& region : GetPhysicalMemoryRegionTree()) {
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||
GetPhysicalLinearMemoryRegionTree().InsertDirectly(
|
||||
region.GetAddress(), region.GetLastAddress(), region.GetAttributes(),
|
||||
region.GetType());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& region : GetVirtualMemoryRegionTree()) {
|
||||
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||
GetVirtualLinearMemoryRegionTree().InsertDirectly(
|
||||
region.GetAddress(), region.GetLastAddress(), region.GetAttributes(),
|
||||
region.GetType());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t KMemoryLayout::GetResourceRegionSizeForInit() {
|
||||
// Calculate resource region size based on whether we allow extra threads.
|
||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||
size_t resource_region_size =
|
||||
KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
|
||||
|
||||
return resource_region_size;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,23 +1,69 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_memory_region.h"
|
||||
#include "core/hle/kernel/k_memory_region_type.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t L1BlockSize = Common::Size_1_GB;
|
||||
constexpr std::size_t L2BlockSize = Common::Size_2_MB;
|
||||
|
||||
constexpr std::size_t GetMaximumOverheadSize(std::size_t size) {
|
||||
return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize;
|
||||
}
|
||||
|
||||
constexpr std::size_t MainMemorySize = Common::Size_4_GB;
|
||||
constexpr std::size_t MainMemorySizeMax = Common::Size_8_GB;
|
||||
|
||||
constexpr std::size_t ReservedEarlyDramSize = 0x60000;
|
||||
constexpr std::size_t DramPhysicalAddress = 0x80000000;
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = Common::Size_2_MB;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
|
||||
|
||||
constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceEnd =
|
||||
KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ULL;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceSize =
|
||||
KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||
constexpr std::size_t KernelVirtualAddressCodeBase = KernelVirtualAddressSpaceBase;
|
||||
constexpr std::size_t KernelVirtualAddressCodeSize = 0x62000;
|
||||
constexpr std::size_t KernelVirtualAddressCodeEnd =
|
||||
KernelVirtualAddressCodeBase + KernelVirtualAddressCodeSize;
|
||||
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceBase = 0ULL;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceEnd =
|
||||
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ULL;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceSize =
|
||||
KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
||||
constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize;
|
||||
|
||||
constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax);
|
||||
constexpr std::size_t KernelInitialPageHeapSize = Common::Size_128_KB;
|
||||
|
||||
constexpr std::size_t KernelSlabHeapDataSize = Common::Size_5_MB;
|
||||
constexpr std::size_t KernelSlabHeapGapsSize = Common::Size_2_MB - Common::Size_64_KB;
|
||||
constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
|
||||
|
||||
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
|
||||
constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000ULL;
|
||||
|
||||
constexpr std::size_t KernelResourceSize =
|
||||
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||
|
||||
constexpr bool IsKernelAddressKey(VAddr key) {
|
||||
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
|
||||
@@ -27,64 +73,327 @@ constexpr bool IsKernelAddress(VAddr address) {
|
||||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class KMemoryRegion final {
|
||||
friend class KMemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
return start_address;
|
||||
}
|
||||
|
||||
constexpr PAddr EndAddress() const {
|
||||
return end_address;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class KMemoryLayout final {
|
||||
public:
|
||||
constexpr const KMemoryRegion& Application() const {
|
||||
return application;
|
||||
KMemoryLayout();
|
||||
|
||||
KMemoryRegionTree& GetVirtualMemoryRegionTree() {
|
||||
return virtual_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
|
||||
return virtual_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
|
||||
return physical_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
|
||||
return physical_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
|
||||
return virtual_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
|
||||
return virtual_linear_tree;
|
||||
}
|
||||
KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
|
||||
return physical_linear_tree;
|
||||
}
|
||||
const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
|
||||
return physical_linear_tree;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegion& Applet() const {
|
||||
return applet;
|
||||
VAddr GetLinearVirtualAddress(PAddr address) const {
|
||||
return address + linear_phys_to_virt_diff;
|
||||
}
|
||||
PAddr GetLinearPhysicalAddress(VAddr address) const {
|
||||
return address + linear_virt_to_phys_diff;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegion& System() const {
|
||||
return system;
|
||||
const KMemoryRegion* FindVirtual(VAddr address) const {
|
||||
return Find(address, GetVirtualMemoryRegionTree());
|
||||
}
|
||||
const KMemoryRegion* FindPhysical(PAddr address) const {
|
||||
return Find(address, GetPhysicalMemoryRegionTree());
|
||||
}
|
||||
|
||||
static constexpr KMemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
constexpr PAddr application_end_address{Core::DramMemoryMap::End};
|
||||
constexpr PAddr applet_start_address{application_start_address - applet_size};
|
||||
constexpr PAddr applet_end_address{applet_start_address + applet_size};
|
||||
constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd};
|
||||
constexpr PAddr system_end_address{applet_start_address};
|
||||
return {application_start_address, application_end_address, applet_start_address,
|
||||
applet_end_address, system_start_address, system_end_address};
|
||||
const KMemoryRegion* FindVirtualLinear(VAddr address) const {
|
||||
return Find(address, GetVirtualLinearMemoryRegionTree());
|
||||
}
|
||||
const KMemoryRegion* FindPhysicalLinear(PAddr address) const {
|
||||
return Find(address, GetPhysicalLinearMemoryRegionTree());
|
||||
}
|
||||
|
||||
VAddr GetMainStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack);
|
||||
}
|
||||
VAddr GetIdleStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack);
|
||||
}
|
||||
VAddr GetExceptionStackTopAddress(s32 core_id) const {
|
||||
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
|
||||
}
|
||||
|
||||
VAddr GetSlabRegionAddress() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
|
||||
.GetAddress();
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
|
||||
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
|
||||
}
|
||||
PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const {
|
||||
return GetDeviceRegion(type).GetAddress();
|
||||
}
|
||||
VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const {
|
||||
return GetDeviceRegion(type).GetPairAddress();
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetPoolManagementRegion() const {
|
||||
return Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement));
|
||||
}
|
||||
const KMemoryRegion& GetPageTableHeapRegion() const {
|
||||
return Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap));
|
||||
}
|
||||
const KMemoryRegion& GetKernelStackRegion() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack));
|
||||
}
|
||||
const KMemoryRegion& GetTempRegion() const {
|
||||
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetKernelTraceBufferRegion() const {
|
||||
return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(
|
||||
KMemoryRegionType_VirtualDramKernelTraceBuffer));
|
||||
}
|
||||
|
||||
const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const {
|
||||
return Dereference(FindVirtualLinear(address));
|
||||
}
|
||||
|
||||
const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer);
|
||||
}
|
||||
const KMemoryRegion* GetPhysicalOnMemoryBootImageRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage);
|
||||
}
|
||||
const KMemoryRegion* GetPhysicalDTBRegion() const {
|
||||
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
|
||||
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_DramUserPool);
|
||||
}
|
||||
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const {
|
||||
return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_VirtualDramUserPool);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_DramUserPool);
|
||||
}
|
||||
bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(),
|
||||
KMemoryRegionType_VirtualDramUserPool);
|
||||
}
|
||||
|
||||
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const {
|
||||
return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(),
|
||||
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
|
||||
}
|
||||
bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address,
|
||||
size_t size) const {
|
||||
return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(),
|
||||
static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped));
|
||||
}
|
||||
|
||||
std::pair<size_t, size_t> GetTotalAndKernelMemorySizes() const {
|
||||
size_t total_size = 0, kernel_size = 0;
|
||||
for (const auto& region : GetPhysicalMemoryRegionTree()) {
|
||||
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||
total_size += region.GetSize();
|
||||
if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||
kernel_size += region.GetSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_pair(total_size, kernel_size);
|
||||
}
|
||||
|
||||
void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
|
||||
VAddr linear_virtual_start);
|
||||
static size_t GetResourceRegionSizeForInit();
|
||||
|
||||
auto GetKernelRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel);
|
||||
}
|
||||
auto GetKernelCodeRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode);
|
||||
}
|
||||
auto GetKernelStackRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack);
|
||||
}
|
||||
auto GetKernelMiscRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc);
|
||||
}
|
||||
auto GetKernelSlabRegionExtents() const {
|
||||
return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab);
|
||||
}
|
||||
|
||||
auto GetLinearRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
}
|
||||
|
||||
auto GetLinearRegionVirtualExtents() const {
|
||||
const auto physical = GetLinearRegionPhysicalExtents();
|
||||
return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()),
|
||||
GetLinearVirtualAddress(physical.GetLastAddress()), 0,
|
||||
KMemoryRegionType_None);
|
||||
}
|
||||
|
||||
auto GetMainMemoryPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram);
|
||||
}
|
||||
auto GetCarveoutRegionExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_CarveoutProtected);
|
||||
}
|
||||
|
||||
auto GetKernelRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelBase);
|
||||
}
|
||||
auto GetKernelCodeRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelCode);
|
||||
}
|
||||
auto GetKernelSlabRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelSlab);
|
||||
}
|
||||
auto GetKernelPageTableHeapRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelPtHeap);
|
||||
}
|
||||
auto GetKernelInitPageTableRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramKernelInitPt);
|
||||
}
|
||||
|
||||
auto GetKernelPoolManagementRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramPoolManagement);
|
||||
}
|
||||
auto GetKernelPoolPartitionRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
auto GetKernelSystemPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramSystemPool);
|
||||
}
|
||||
auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramSystemNonSecurePool);
|
||||
}
|
||||
auto GetKernelAppletPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramAppletPool);
|
||||
}
|
||||
auto GetKernelApplicationPoolRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_DramApplicationPool);
|
||||
}
|
||||
|
||||
auto GetKernelTraceBufferRegionPhysicalExtents() const {
|
||||
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionType_KernelTraceBuffer);
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
template <typename AddressType>
|
||||
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address,
|
||||
const KMemoryRegionTree& tree, KMemoryRegionType type) {
|
||||
// Check if the cached region already contains the address.
|
||||
if (region != nullptr && region->Contains(address)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const KMemoryRegion application;
|
||||
const KMemoryRegion applet;
|
||||
const KMemoryRegion system;
|
||||
// Find the containing region, and update the cache.
|
||||
if (const KMemoryRegion* found = tree.Find(address);
|
||||
found != nullptr && found->IsDerivedFrom(type)) {
|
||||
region = found;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size,
|
||||
const KMemoryRegionTree& tree, KMemoryRegionType type) {
|
||||
// Get the end of the checked region.
|
||||
const u64 last_address = address + size - 1;
|
||||
|
||||
// Walk the tree to verify the region is correct.
|
||||
const KMemoryRegion* cur =
|
||||
(region != nullptr && region->Contains(address)) ? region : tree.Find(address);
|
||||
while (cur != nullptr && cur->IsDerivedFrom(type)) {
|
||||
if (last_address <= cur->GetLastAddress()) {
|
||||
region = cur;
|
||||
return true;
|
||||
}
|
||||
|
||||
cur = cur->GetNext();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename AddressType>
|
||||
static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) {
|
||||
return tree.Find(address);
|
||||
}
|
||||
|
||||
static KMemoryRegion& Dereference(KMemoryRegion* region) {
|
||||
ASSERT(region != nullptr);
|
||||
return *region;
|
||||
}
|
||||
|
||||
static const KMemoryRegion& Dereference(const KMemoryRegion* region) {
|
||||
ASSERT(region != nullptr);
|
||||
return *region;
|
||||
}
|
||||
|
||||
VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const {
|
||||
const auto& region = Dereference(
|
||||
GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
return region.GetEndAddress();
|
||||
}
|
||||
|
||||
private:
|
||||
u64 linear_phys_to_virt_diff{};
|
||||
u64 linear_virt_to_phys_diff{};
|
||||
KMemoryRegionAllocator memory_region_allocator;
|
||||
KMemoryRegionTree virtual_tree;
|
||||
KMemoryRegionTree physical_tree;
|
||||
KMemoryRegionTree virtual_linear_tree;
|
||||
KMemoryRegionTree physical_linear_tree;
|
||||
};
|
||||
|
||||
namespace Init {
|
||||
|
||||
// These should be generic, regardless of board.
|
||||
void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout);
|
||||
|
||||
// These may be implemented in a board-specific manner.
|
||||
void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout);
|
||||
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout);
|
||||
|
||||
} // namespace Init
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -173,4 +173,16 @@ ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_page
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||
const std::size_t optimize_map_size =
|
||||
(Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
const std::size_t manager_meta_size =
|
||||
Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
|
||||
const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
|
||||
return manager_meta_size + page_heap_size;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -29,6 +29,10 @@ public:
|
||||
|
||||
Shift = 4,
|
||||
Mask = (0xF << Shift),
|
||||
|
||||
// Aliases.
|
||||
Unsafe = Application,
|
||||
Secure = System,
|
||||
};
|
||||
|
||||
enum class Direction : u32 {
|
||||
@@ -56,6 +60,10 @@ public:
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
public:
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
return Impl::CalculateManagementOverheadSize(region_size);
|
||||
}
|
||||
|
||||
static constexpr u32 EncodeOption(Pool pool, Direction dir) {
|
||||
return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) |
|
||||
(static_cast<u32>(dir) << static_cast<u32>(Direction::Shift));
|
||||
@@ -85,6 +93,16 @@ private:
|
||||
KPageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
|
||||
|
||||
static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
|
||||
std::size_t region_size) {
|
||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||
Common::BitSize<u64>()) *
|
||||
sizeof(u64);
|
||||
}
|
||||
|
||||
public:
|
||||
Impl() = default;
|
||||
|
||||
|
||||
350
src/core/hle/kernel/k_memory_region.h
Normal file
350
src/core/hle/kernel/k_memory_region.h
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "core/hle/kernel/k_memory_region_type.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KMemoryRegionAllocator;
|
||||
|
||||
class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>,
|
||||
NonCopyable {
|
||||
friend class KMemoryRegionTree;
|
||||
|
||||
public:
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(u64 address_, u64 last_address_)
|
||||
: address{address_}, last_address{last_address_} {}
|
||||
constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_,
|
||||
u32 type_id_)
|
||||
: address(address_), last_address(last_address_), pair_address(pair_address_),
|
||||
attributes(attributes_), type_id(type_id_) {}
|
||||
constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_)
|
||||
: KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_,
|
||||
type_id_) {}
|
||||
|
||||
static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
|
||||
address = a;
|
||||
pair_address = p;
|
||||
last_address = la;
|
||||
attributes = r;
|
||||
type_id = t;
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr u64 GetAddress() const {
|
||||
return address;
|
||||
}
|
||||
|
||||
constexpr u64 GetPairAddress() const {
|
||||
return pair_address;
|
||||
}
|
||||
|
||||
constexpr u64 GetLastAddress() const {
|
||||
return last_address;
|
||||
}
|
||||
|
||||
constexpr u64 GetEndAddress() const {
|
||||
return this->GetLastAddress() + 1;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetEndAddress() - this->GetAddress();
|
||||
}
|
||||
|
||||
constexpr u32 GetAttributes() const {
|
||||
return attributes;
|
||||
}
|
||||
|
||||
constexpr u32 GetType() const {
|
||||
return type_id;
|
||||
}
|
||||
|
||||
constexpr void SetType(u32 type) {
|
||||
ASSERT(this->CanDerive(type));
|
||||
type_id = type;
|
||||
}
|
||||
|
||||
constexpr bool Contains(u64 address) const {
|
||||
ASSERT(this->GetEndAddress() != 0);
|
||||
return this->GetAddress() <= address && address <= this->GetLastAddress();
|
||||
}
|
||||
|
||||
constexpr bool IsDerivedFrom(u32 type) const {
|
||||
return (this->GetType() | type) == this->GetType();
|
||||
}
|
||||
|
||||
constexpr bool HasTypeAttribute(u32 attr) const {
|
||||
return (this->GetType() | attr) == this->GetType();
|
||||
}
|
||||
|
||||
constexpr bool CanDerive(u32 type) const {
|
||||
return (this->GetType() | type) == type;
|
||||
}
|
||||
|
||||
constexpr void SetPairAddress(u64 a) {
|
||||
pair_address = a;
|
||||
}
|
||||
|
||||
constexpr void SetTypeAttribute(u32 attr) {
|
||||
type_id |= attr;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 address{};
|
||||
u64 last_address{};
|
||||
u64 pair_address{};
|
||||
u32 attributes{};
|
||||
u32 type_id{};
|
||||
};
|
||||
|
||||
class KMemoryRegionTree final : NonCopyable {
|
||||
public:
|
||||
struct DerivedRegionExtents {
|
||||
const KMemoryRegion* first_region{};
|
||||
const KMemoryRegion* last_region{};
|
||||
|
||||
constexpr DerivedRegionExtents() = default;
|
||||
|
||||
constexpr u64 GetAddress() const {
|
||||
return this->first_region->GetAddress();
|
||||
}
|
||||
|
||||
constexpr u64 GetLastAddress() const {
|
||||
return this->last_region->GetLastAddress();
|
||||
}
|
||||
|
||||
constexpr u64 GetEndAddress() const {
|
||||
return this->GetLastAddress() + 1;
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const {
|
||||
return this->GetEndAddress() - this->GetAddress();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
using TreeType =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
||||
|
||||
public:
|
||||
using value_type = TreeType::value_type;
|
||||
using size_type = TreeType::size_type;
|
||||
using difference_type = TreeType::difference_type;
|
||||
using pointer = TreeType::pointer;
|
||||
using const_pointer = TreeType::const_pointer;
|
||||
using reference = TreeType::reference;
|
||||
using const_reference = TreeType::const_reference;
|
||||
using iterator = TreeType::iterator;
|
||||
using const_iterator = TreeType::const_iterator;
|
||||
|
||||
private:
|
||||
TreeType m_tree{};
|
||||
KMemoryRegionAllocator& memory_region_allocator;
|
||||
|
||||
public:
|
||||
explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_);
|
||||
|
||||
public:
|
||||
KMemoryRegion* FindModifiable(u64 address) {
|
||||
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
|
||||
return std::addressof(*it);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
const KMemoryRegion* Find(u64 address) const {
|
||||
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->cend()) {
|
||||
return std::addressof(*it);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
const KMemoryRegion* FindByType(KMemoryRegionType type_id) const {
|
||||
for (auto it = this->cbegin(); it != this->cend(); ++it) {
|
||||
if (it->GetType() == static_cast<u32>(type_id)) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const KMemoryRegion* FindByTypeAndAttribute(u32 type_id, u32 attr) const {
|
||||
for (auto it = this->cbegin(); it != this->cend(); ++it) {
|
||||
if (it->GetType() == type_id && it->GetAttributes() == attr) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const KMemoryRegion* FindFirstDerived(KMemoryRegionType type_id) const {
|
||||
for (auto it = this->cbegin(); it != this->cend(); it++) {
|
||||
if (it->IsDerivedFrom(type_id)) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const KMemoryRegion* FindLastDerived(KMemoryRegionType type_id) const {
|
||||
const KMemoryRegion* region = nullptr;
|
||||
for (auto it = this->begin(); it != this->end(); it++) {
|
||||
if (it->IsDerivedFrom(type_id)) {
|
||||
region = std::addressof(*it);
|
||||
}
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
DerivedRegionExtents GetDerivedRegionExtents(KMemoryRegionType type_id) const {
|
||||
DerivedRegionExtents extents;
|
||||
|
||||
ASSERT(extents.first_region == nullptr);
|
||||
ASSERT(extents.last_region == nullptr);
|
||||
|
||||
for (auto it = this->cbegin(); it != this->cend(); it++) {
|
||||
if (it->IsDerivedFrom(type_id)) {
|
||||
if (extents.first_region == nullptr) {
|
||||
extents.first_region = std::addressof(*it);
|
||||
}
|
||||
extents.last_region = std::addressof(*it);
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(extents.first_region != nullptr);
|
||||
ASSERT(extents.last_region != nullptr);
|
||||
|
||||
return extents;
|
||||
}
|
||||
|
||||
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const {
|
||||
return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
|
||||
}
|
||||
|
||||
public:
|
||||
void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
|
||||
bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
|
||||
|
||||
VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
|
||||
|
||||
VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id,
|
||||
size_t guard_size) {
|
||||
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
|
||||
}
|
||||
|
||||
public:
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return m_tree.begin();
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return m_tree.begin();
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return m_tree.end();
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return m_tree.end();
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return m_tree.iterator_to(ref);
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return m_tree.iterator_to(ref);
|
||||
}
|
||||
|
||||
// Content management.
|
||||
bool empty() const {
|
||||
return m_tree.empty();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return m_tree.back();
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return m_tree.back();
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return m_tree.front();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return m_tree.front();
|
||||
}
|
||||
|
||||
iterator insert(reference ref) {
|
||||
return m_tree.insert(ref);
|
||||
}
|
||||
|
||||
iterator erase(iterator it) {
|
||||
return m_tree.erase(it);
|
||||
}
|
||||
|
||||
iterator find(const_reference ref) const {
|
||||
return m_tree.find(ref);
|
||||
}
|
||||
|
||||
iterator nfind(const_reference ref) const {
|
||||
return m_tree.nfind(ref);
|
||||
}
|
||||
};
|
||||
|
||||
class KMemoryRegionAllocator final : NonCopyable {
|
||||
public:
|
||||
static constexpr size_t MaxMemoryRegions = 200;
|
||||
|
||||
constexpr KMemoryRegionAllocator() = default;
|
||||
|
||||
template <typename... Args>
|
||||
KMemoryRegion* Allocate(Args&&... args) {
|
||||
// Ensure we stay within the bounds of our heap.
|
||||
ASSERT(this->num_regions < MaxMemoryRegions);
|
||||
|
||||
// Create the new region.
|
||||
KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]);
|
||||
new (region) KMemoryRegion(std::forward<Args>(args)...);
|
||||
|
||||
return region;
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<KMemoryRegion, MaxMemoryRegions> region_heap{};
|
||||
size_t num_regions{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
338
src/core/hle/kernel/k_memory_region_type.h
Normal file
338
src/core/hle/kernel/k_memory_region_type.h
Normal file
@@ -0,0 +1,338 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
#define ARCH_ARM64
|
||||
#define BOARD_NINTENDO_NX
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
enum KMemoryRegionType : u32 {
|
||||
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
||||
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
||||
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
|
||||
KMemoryRegionAttr_UserReadOnly = 0x20000000,
|
||||
KMemoryRegionAttr_NoUserMap = 0x40000000,
|
||||
KMemoryRegionAttr_LinearMapped = 0x80000000,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionType);
|
||||
|
||||
namespace impl {
|
||||
|
||||
constexpr size_t BitsForDeriveSparse(size_t n) {
|
||||
return n + 1;
|
||||
}
|
||||
|
||||
constexpr size_t BitsForDeriveDense(size_t n) {
|
||||
size_t low = 0, high = 1;
|
||||
for (size_t i = 0; i < n - 1; ++i) {
|
||||
if ((++low) == high) {
|
||||
++high;
|
||||
low = 0;
|
||||
}
|
||||
}
|
||||
return high + 1;
|
||||
}
|
||||
|
||||
class KMemoryRegionTypeValue {
|
||||
public:
|
||||
using ValueType = std::underlying_type_t<KMemoryRegionType>;
|
||||
|
||||
constexpr KMemoryRegionTypeValue() = default;
|
||||
|
||||
constexpr operator KMemoryRegionType() const {
|
||||
return static_cast<KMemoryRegionType>(m_value);
|
||||
}
|
||||
|
||||
constexpr ValueType GetValue() const {
|
||||
return m_value;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegionTypeValue& Finalize() {
|
||||
m_finalized = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegionTypeValue& SetSparseOnly() {
|
||||
m_sparse_only = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr const KMemoryRegionTypeValue& SetDenseOnly() {
|
||||
m_dense_only = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue& SetAttribute(u32 attr) {
|
||||
m_value |= attr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue DeriveInitial(
|
||||
size_t i, size_t next = Common::BitSize<ValueType>()) const {
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_value = (ValueType{1} << i);
|
||||
new_type.m_next_bit = next;
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue DeriveAttribute(u32 attr) const {
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_value |= attr;
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const {
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
|
||||
new_type.m_next_bit += adv;
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const {
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
|
||||
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i));
|
||||
new_type.m_next_bit += ofs + n + 1;
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue Derive(size_t n, size_t i) const {
|
||||
size_t low = 0, high = 1;
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
if ((++low) == high) {
|
||||
++high;
|
||||
low = 0;
|
||||
}
|
||||
}
|
||||
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_value |= (ValueType{1} << (m_next_bit + low));
|
||||
new_type.m_value |= (ValueType{1} << (m_next_bit + high));
|
||||
new_type.m_next_bit += BitsForDeriveDense(n);
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr KMemoryRegionTypeValue Advance(size_t n) const {
|
||||
KMemoryRegionTypeValue new_type = *this;
|
||||
new_type.m_next_bit += n;
|
||||
return new_type;
|
||||
}
|
||||
|
||||
constexpr bool IsAncestorOf(ValueType v) const {
|
||||
return (m_value | v) == v;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr KMemoryRegionTypeValue(ValueType v) : m_value(v) {}
|
||||
|
||||
private:
|
||||
ValueType m_value{};
|
||||
size_t m_next_bit{};
|
||||
bool m_finalized{};
|
||||
bool m_sparse_only{};
|
||||
bool m_dense_only{};
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
|
||||
constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
|
||||
constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
|
||||
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
|
||||
static_assert(KMemoryRegionType_Dram.GetValue() == 0x2);
|
||||
|
||||
constexpr auto KMemoryRegionType_DramKernelBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(0, 3, 0)
|
||||
.SetAttribute(KMemoryRegionAttr_NoUserMap)
|
||||
.SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||
constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
|
||||
constexpr auto KMemoryRegionType_DramHeapBase =
|
||||
KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelBase.GetValue() ==
|
||||
(0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));
|
||||
static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramKernelCode =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0);
|
||||
constexpr auto KMemoryRegionType_DramKernelSlab =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1);
|
||||
constexpr auto KMemoryRegionType_DramKernelPtHeap =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
constexpr auto KMemoryRegionType_DramKernelInitPt =
|
||||
KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
static_assert(KMemoryRegionType_DramKernelCode.GetValue() ==
|
||||
(0xCE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramKernelSlab.GetValue() ==
|
||||
(0x14E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() ==
|
||||
(0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
|
||||
(0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_LinearMapped));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramReservedEarly =
|
||||
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() ==
|
||||
(0x16 | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelTraceBuffer =
|
||||
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0)
|
||||
.SetAttribute(KMemoryRegionAttr_LinearMapped)
|
||||
.SetAttribute(KMemoryRegionAttr_UserReadOnly);
|
||||
constexpr auto KMemoryRegionType_OnMemoryBootImage =
|
||||
KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1);
|
||||
constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
|
||||
static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() ==
|
||||
(0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));
|
||||
static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);
|
||||
static_assert(KMemoryRegionType_DTB.GetValue() == 0x256);
|
||||
|
||||
constexpr auto KMemoryRegionType_DramPoolPartition =
|
||||
KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
|
||||
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramPoolManagement =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
|
||||
KMemoryRegionAttr_CarveoutProtected);
|
||||
constexpr auto KMemoryRegionType_DramUserPool =
|
||||
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
|
||||
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
|
||||
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_CarveoutProtected));
|
||||
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
|
||||
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
|
||||
constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
|
||||
constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
|
||||
constexpr auto KMemoryRegionType_DramSystemNonSecurePool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 2);
|
||||
constexpr auto KMemoryRegionType_DramSystemPool =
|
||||
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
|
||||
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
|
||||
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
|
||||
(0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
|
||||
(0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
|
||||
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
|
||||
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
|
||||
KMemoryRegionAttr_CarveoutProtected));
|
||||
|
||||
constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
|
||||
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||
|
||||
constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
||||
constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramUserPool =
|
||||
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
||||
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
|
||||
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
|
||||
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
|
||||
|
||||
// NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying
|
||||
// to understand why Nintendo made this choice.
|
||||
// UNUSED: .Derive(6, 0);
|
||||
// UNUSED: .Derive(6, 1);
|
||||
constexpr auto KMemoryRegionType_VirtualDramAppletPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
|
||||
constexpr auto KMemoryRegionType_VirtualDramApplicationPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
|
||||
constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
|
||||
constexpr auto KMemoryRegionType_VirtualDramSystemPool =
|
||||
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
|
||||
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
|
||||
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
|
||||
|
||||
constexpr auto KMemoryRegionType_ArchDeviceBase =
|
||||
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
|
||||
constexpr auto KMemoryRegionType_BoardDeviceBase =
|
||||
KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
|
||||
static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5);
|
||||
static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
|
||||
|
||||
#if defined(ARCH_ARM64)
|
||||
#include "core/hle/kernel/arch/arm64/k_memory_region_device_types.inc"
|
||||
#elif defined(ARCH_ARM)
|
||||
#error "Unimplemented"
|
||||
#else
|
||||
// Default to no architecture devices.
|
||||
constexpr auto NumArchitectureDeviceRegions = 0;
|
||||
#endif
|
||||
static_assert(NumArchitectureDeviceRegions >= 0);
|
||||
|
||||
#if defined(BOARD_NINTENDO_NX)
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc"
|
||||
#else
|
||||
// Default to no board devices.
|
||||
constexpr auto NumBoardDeviceRegions = 0;
|
||||
#endif
|
||||
static_assert(NumBoardDeviceRegions >= 0);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
|
||||
constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
|
||||
constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
|
||||
constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
|
||||
static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19);
|
||||
static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);
|
||||
static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49);
|
||||
static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelMiscDerivedBase =
|
||||
KMemoryRegionType_KernelMisc.DeriveTransition();
|
||||
static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);
|
||||
|
||||
// UNUSED: .Derive(7, 0);
|
||||
constexpr auto KMemoryRegionType_KernelMiscMainStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
|
||||
constexpr auto KMemoryRegionType_KernelMiscMappedDevice =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
|
||||
constexpr auto KMemoryRegionType_KernelMiscExceptionStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
|
||||
constexpr auto KMemoryRegionType_KernelMiscUnknownDebug =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
|
||||
// UNUSED: .Derive(7, 5);
|
||||
constexpr auto KMemoryRegionType_KernelMiscIdleStack =
|
||||
KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
|
||||
static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49);
|
||||
static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49);
|
||||
static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
|
||||
static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549);
|
||||
static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349);
|
||||
|
||||
constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
|
||||
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
|
||||
|
||||
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||
} else {
|
||||
return KMemoryRegionType_Dram;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -2,21 +2,16 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
|
||||
|
||||
KResourceLimit::KResourceLimit(KernelCore& kernel, Core::System& system)
|
||||
: Object{kernel}, lock{kernel}, cond_var{kernel}, kernel{kernel}, system(system) {}
|
||||
KResourceLimit::KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_)
|
||||
: Object{kernel}, lock{kernel}, cond_var{kernel}, core_timing(core_timing_) {}
|
||||
KResourceLimit::~KResourceLimit() = default;
|
||||
|
||||
s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
|
||||
@@ -83,7 +78,7 @@ ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
}
|
||||
|
||||
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
|
||||
return Reserve(which, value, system.CoreTiming().GetGlobalTimeNs().count() + DefaultTimeout);
|
||||
return Reserve(which, value, core_timing.GetGlobalTimeNs().count() + DefaultTimeout);
|
||||
}
|
||||
|
||||
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
|
||||
@@ -114,7 +109,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
|
||||
}
|
||||
|
||||
if (current_hints[index] + value <= limit_values[index] &&
|
||||
(timeout < 0 || system.CoreTiming().GetGlobalTimeNs().count() < timeout)) {
|
||||
(timeout < 0 || core_timing.GetGlobalTimeNs().count() < timeout)) {
|
||||
waiter_count++;
|
||||
cond_var.Wait(&lock, timeout);
|
||||
waiter_count--;
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
@@ -15,8 +12,8 @@
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
@@ -37,7 +34,7 @@ constexpr bool IsValidResourceType(LimitableResource type) {
|
||||
|
||||
class KResourceLimit final : public Object {
|
||||
public:
|
||||
explicit KResourceLimit(KernelCore& kernel, Core::System& system);
|
||||
explicit KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_);
|
||||
~KResourceLimit();
|
||||
|
||||
s64 GetLimitValue(LimitableResource which) const;
|
||||
@@ -75,7 +72,6 @@ private:
|
||||
mutable KLightLock lock;
|
||||
s32 waiter_count{};
|
||||
KLightConditionVariable cond_var;
|
||||
KernelCore& kernel;
|
||||
Core::System& system;
|
||||
const Core::Timing::CoreTiming& core_timing;
|
||||
};
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
||||
prev_highest_thread != highest_thread) {
|
||||
if (prev_highest_thread != nullptr) {
|
||||
@@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
if (phys_core.IsInterrupted()) {
|
||||
phys_core.ClearInterrupt();
|
||||
}
|
||||
guard.lock();
|
||||
guard.Lock();
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) {
|
||||
} else {
|
||||
prev_thread = nullptr;
|
||||
}
|
||||
thread->context_guard.unlock();
|
||||
thread->context_guard.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() {
|
||||
|
||||
// If we're not actually switching thread, there's nothing to do.
|
||||
if (next_thread == current_thread.load()) {
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -732,9 +732,9 @@ void KScheduler::ScheduleImpl() {
|
||||
} else {
|
||||
old_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
guard.unlock();
|
||||
guard.Unlock();
|
||||
|
||||
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||
auto& next_scheduler = *system.Kernel().CurrentScheduler();
|
||||
next_scheduler.SwitchContextStep2();
|
||||
@@ -748,34 +748,29 @@ void KScheduler::OnSwitch(void* this_scheduler) {
|
||||
void KScheduler::SwitchToCurrent() {
|
||||
while (true) {
|
||||
{
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
current_thread.store(state.highest_priority_thread);
|
||||
state.needs_scheduling.store(false);
|
||||
}
|
||||
const auto is_switch_pending = [this] {
|
||||
std::scoped_lock lock{guard};
|
||||
KScopedSpinLock lk{guard};
|
||||
return state.needs_scheduling.load();
|
||||
};
|
||||
do {
|
||||
auto next_thread = current_thread.load();
|
||||
if (next_thread != nullptr) {
|
||||
next_thread->context_guard.lock();
|
||||
next_thread->context_guard.Lock();
|
||||
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
||||
next_thread->context_guard.unlock();
|
||||
next_thread->context_guard.Unlock();
|
||||
break;
|
||||
}
|
||||
if (next_thread->GetActiveCore() != core_id) {
|
||||
next_thread->context_guard.unlock();
|
||||
next_thread->context_guard.Unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Common::Fiber>* next_context;
|
||||
if (next_thread != nullptr) {
|
||||
next_context = &next_thread->GetHostContext();
|
||||
} else {
|
||||
next_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
||||
auto thread = next_thread ? next_thread : idle_thread;
|
||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||
} while (!is_switch_pending());
|
||||
}
|
||||
}
|
||||
@@ -800,9 +795,9 @@ void KScheduler::Initialize() {
|
||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
|
||||
KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::CreateThread(
|
||||
system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
|
||||
static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
|
||||
idle_thread = thread_res.Unwrap().get();
|
||||
}
|
||||
|
||||
|
||||
@@ -2,19 +2,16 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hle/kernel/global_scheduler_context.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
@@ -195,12 +192,12 @@ private:
|
||||
u64 last_context_switch_time{};
|
||||
const s32 core_id;
|
||||
|
||||
Common::SpinLock guard{};
|
||||
KSpinLock guard{};
|
||||
};
|
||||
|
||||
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||
class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
|
||||
public:
|
||||
explicit KScopedSchedulerLock(KernelCore& kernel);
|
||||
explicit KScopedSchedulerLock(KernelCore & kernel);
|
||||
~KScopedSchedulerLock();
|
||||
};
|
||||
|
||||
|
||||
@@ -2,14 +2,11 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
@@ -34,7 +31,7 @@ public:
|
||||
} else {
|
||||
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||
SchedulerType::DisableScheduling(kernel);
|
||||
spin_lock.lock();
|
||||
spin_lock.Lock();
|
||||
|
||||
// For debug, ensure that our state is valid.
|
||||
ASSERT(lock_count == 0);
|
||||
@@ -58,7 +55,7 @@ public:
|
||||
|
||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||
owner_thread = nullptr;
|
||||
spin_lock.unlock();
|
||||
spin_lock.Unlock();
|
||||
|
||||
// Enable scheduling, and perform a rescheduling operation.
|
||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
|
||||
@@ -67,7 +64,7 @@ public:
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Common::SpinLock spin_lock{};
|
||||
KAlignedSpinLock spin_lock{};
|
||||
s32 lock_count{};
|
||||
KThread* owner_thread{};
|
||||
};
|
||||
|
||||
@@ -20,19 +20,22 @@ concept KLockable = !std::is_reference_v<T> && requires(T & t) {
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires KLockable<T> class KScopedLock {
|
||||
requires KLockable<T> class [[nodiscard]] KScopedLock {
|
||||
public:
|
||||
explicit KScopedLock(T* l) : lock_ptr(l) {
|
||||
explicit KScopedLock(T * l) : lock_ptr(l) {
|
||||
this->lock_ptr->Lock();
|
||||
}
|
||||
explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
|
||||
}
|
||||
explicit KScopedLock(T & l) : KScopedLock(std::addressof(l)) {}
|
||||
|
||||
~KScopedLock() {
|
||||
this->lock_ptr->Unlock();
|
||||
}
|
||||
|
||||
KScopedLock(const KScopedLock&) = delete;
|
||||
KScopedLock(KScopedLock&&) = delete;
|
||||
KScopedLock& operator=(const KScopedLock&) = delete;
|
||||
|
||||
KScopedLock(KScopedLock &&) = delete;
|
||||
KScopedLock& operator=(KScopedLock&&) = delete;
|
||||
|
||||
private:
|
||||
T* lock_ptr;
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KScopedSchedulerLockAndSleep {
|
||||
class [[nodiscard]] KScopedSchedulerLockAndSleep {
|
||||
public:
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore & kernel, KThread * t, s64 timeout)
|
||||
: kernel(kernel), thread(t), timeout_tick(timeout) {
|
||||
// Lock the scheduler.
|
||||
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
||||
|
||||
@@ -28,6 +28,12 @@ private:
|
||||
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||
};
|
||||
|
||||
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
|
||||
using KAlignedSpinLock = KSpinLock;
|
||||
using KNotAlignedSpinLock = KSpinLock;
|
||||
|
||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
|
||||
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
// Handle the case where the difference is too large to represent.
|
||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||
return f();
|
||||
}
|
||||
|
||||
// Iterate until we get a value in range.
|
||||
const u64 range_size = ((max + 1) - min);
|
||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||
while (true) {
|
||||
if (const u64 rnd = f(); rnd < effective_max) {
|
||||
return min + (rnd % range_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -6,14 +6,18 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#define BOARD_NINTENDO_NX
|
||||
|
||||
#ifdef BOARD_NINTENDO_NX
|
||||
|
||||
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
KSystemControl() = default;
|
||||
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
};
|
||||
using Kernel::Board::Nintendo::Nx::KSystemControl;
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
#else
|
||||
#error "Unknown board for KSystemControl"
|
||||
#endif
|
||||
|
||||
@@ -995,22 +995,11 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system,
|
||||
ThreadType type_flags, std::string name,
|
||||
VAddr entry_point, u32 priority, u64 arg,
|
||||
s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
|
||||
@@ -1027,12 +1016,35 @@ ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, Thread
|
||||
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||
scheduler.AddThread(thread);
|
||||
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
|
||||
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) {
|
||||
auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg,
|
||||
processor_id, stack_top, owner_process);
|
||||
|
||||
if (thread_result.Succeeded()) {
|
||||
(*thread_result)->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
}
|
||||
|
||||
return thread_result;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
|
||||
return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id,
|
||||
stack_top, owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
|
||||
return kernel.GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
@@ -116,7 +116,7 @@ public:
|
||||
using WaiterList = boost::intrusive::list<KThread>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* Creates and returns a new thread.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
@@ -127,12 +127,12 @@ public:
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* Creates and returns a new thread, with a specified entry point.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
@@ -145,11 +145,27 @@ public:
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread for the emulated "user" process.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
|
||||
|
||||
[[nodiscard]] std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
@@ -386,7 +402,7 @@ public:
|
||||
return wait_cancelled;
|
||||
}
|
||||
|
||||
[[nodiscard]] void ClearWaitCancelled() {
|
||||
void ClearWaitCancelled() {
|
||||
wait_cancelled = false;
|
||||
}
|
||||
|
||||
@@ -716,7 +732,7 @@ private:
|
||||
s8 priority_inheritance_count{};
|
||||
bool resource_limit_release_hint{};
|
||||
StackParameters stack_parameters{};
|
||||
Common::SpinLock context_guard{};
|
||||
KSpinLock context_guard{};
|
||||
|
||||
// For emulation
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
||||
12
src/core/hle/kernel/k_trace.h
Normal file
12
src/core/hle/kernel/k_trace.h
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
constexpr bool IsKTraceEnabled = false;
|
||||
constexpr std::size_t KTraceBufferSize = IsKTraceEnabled ? 16 * 1024 * 1024 : 0;
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_sizes.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
@@ -66,11 +67,16 @@ struct KernelCore::Impl {
|
||||
is_phantom_mode_for_singlecore = false;
|
||||
|
||||
InitializePhysicalCores();
|
||||
InitializeSystemResourceLimit(kernel, system);
|
||||
InitializeMemoryLayout();
|
||||
InitializePreemption(kernel);
|
||||
|
||||
// Derive the initial memory layout from the emulated board
|
||||
KMemoryLayout memory_layout;
|
||||
DeriveInitialMemoryLayout(memory_layout);
|
||||
InitializeMemoryLayout(memory_layout);
|
||||
InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
|
||||
InitializeSlabHeaps();
|
||||
InitializeSchedulers();
|
||||
InitializeSuspendThreads();
|
||||
InitializePreemption(kernel);
|
||||
}
|
||||
|
||||
void InitializeCores() {
|
||||
@@ -101,8 +107,6 @@ struct KernelCore::Impl {
|
||||
|
||||
current_process = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
|
||||
global_handle_table.Clear();
|
||||
|
||||
preemption_event = nullptr;
|
||||
@@ -111,6 +115,13 @@ struct KernelCore::Impl {
|
||||
|
||||
exclusive_monitor.reset();
|
||||
|
||||
hid_shared_mem = nullptr;
|
||||
font_shared_mem = nullptr;
|
||||
irs_shared_mem = nullptr;
|
||||
time_shared_mem = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
|
||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||
}
|
||||
@@ -131,27 +142,33 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
// Creates the default system resource limit
|
||||
void InitializeSystemResourceLimit(KernelCore& kernel, Core::System& system) {
|
||||
system_resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
||||
void InitializeSystemResourceLimit(KernelCore& kernel,
|
||||
const Core::Timing::CoreTiming& core_timing,
|
||||
const KMemoryLayout& memory_layout) {
|
||||
system_resource_limit = std::make_shared<KResourceLimit>(kernel, core_timing);
|
||||
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
|
||||
|
||||
// If setting the default system values fails, then something seriously wrong has occurred.
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, 0x100000000)
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
|
||||
.IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
|
||||
.IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 933).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess());
|
||||
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size);
|
||||
|
||||
// Derived from recent software updates. The kernel reserves 27MB
|
||||
constexpr u64 kernel_size{0x1b00000};
|
||||
if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size)) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
// Reserve secure applet memory, introduced in firmware 5.0.0
|
||||
constexpr u64 secure_applet_memory_size{0x400000};
|
||||
constexpr u64 secure_applet_memory_size{Common::Size_4_MB};
|
||||
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
||||
secure_applet_memory_size));
|
||||
|
||||
// This memory seems to be reserved on hardware, but is not reserved/used by yuzu.
|
||||
// Likely Horizon OS reserved memory
|
||||
// TODO(ameerj): Derive the memory rather than hardcode it.
|
||||
constexpr u64 unknown_reserved_memory{0x2f896000};
|
||||
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
||||
unknown_reserved_memory));
|
||||
}
|
||||
|
||||
void InitializePreemption(KernelCore& kernel) {
|
||||
@@ -176,9 +193,9 @@ struct KernelCore::Impl {
|
||||
std::string name = "Suspend Thread Id:" + std::to_string(i);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
|
||||
0, 0, static_cast<u32>(i), 0, nullptr,
|
||||
std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::CreateThread(
|
||||
system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
}
|
||||
@@ -216,10 +233,9 @@ struct KernelCore::Impl {
|
||||
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
||||
KThread* GetHostDummyThread() {
|
||||
const thread_local auto thread =
|
||||
KThread::Create(
|
||||
KThread::CreateThread(
|
||||
system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
|
||||
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr,
|
||||
[]([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr)
|
||||
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr)
|
||||
.Unwrap();
|
||||
return thread.get();
|
||||
}
|
||||
@@ -264,51 +280,319 @@ struct KernelCore::Impl {
|
||||
return schedulers[thread_id]->GetCurrentThread();
|
||||
}
|
||||
|
||||
void InitializeMemoryLayout() {
|
||||
// Initialize memory layout
|
||||
constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()};
|
||||
void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
|
||||
// Insert the root region for the virtual memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
|
||||
KernelVirtualAddressSpaceBase,
|
||||
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
|
||||
|
||||
// Insert the root region for the physical memory tree, from which all other regions will
|
||||
// derive.
|
||||
memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
|
||||
KernelPhysicalAddressSpaceBase,
|
||||
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
|
||||
|
||||
// Save start and end for ease of use.
|
||||
const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
|
||||
const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
|
||||
|
||||
// Setup the containing kernel region.
|
||||
constexpr size_t KernelRegionSize = Common::Size_1_GB;
|
||||
constexpr size_t KernelRegionAlign = Common::Size_1_GB;
|
||||
constexpr VAddr kernel_region_start =
|
||||
Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
|
||||
size_t kernel_region_size = KernelRegionSize;
|
||||
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
|
||||
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
|
||||
}
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
|
||||
|
||||
// Setup the code region.
|
||||
constexpr size_t CodeRegionAlign = PageSize;
|
||||
constexpr VAddr code_region_start =
|
||||
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
|
||||
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
|
||||
constexpr size_t code_region_size = code_region_end - code_region_start;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
|
||||
|
||||
// Setup board-specific device physical regions.
|
||||
Init::SetupDevicePhysicalMemoryRegions(memory_layout);
|
||||
|
||||
// Determine the amount of space needed for the misc region.
|
||||
size_t misc_region_needed_size;
|
||||
{
|
||||
// Each core has a one page stack for all three stack types (Main, Idle, Exception).
|
||||
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
|
||||
|
||||
// Account for each auto-map device.
|
||||
for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Account for the region.
|
||||
misc_region_needed_size +=
|
||||
PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) -
|
||||
Common::AlignDown(region.GetAddress(), PageSize));
|
||||
}
|
||||
}
|
||||
|
||||
// Multiply the needed size by three, to account for the need for guard space.
|
||||
misc_region_needed_size *= 3;
|
||||
}
|
||||
|
||||
// Decide on the actual size for the misc region.
|
||||
constexpr size_t MiscRegionAlign = KernelAslrAlignment;
|
||||
constexpr size_t MiscRegionMinimumSize = Common::Size_32_MB;
|
||||
const size_t misc_region_size = Common::AlignUp(
|
||||
std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
|
||||
ASSERT(misc_region_size > 0);
|
||||
|
||||
// Setup the misc region.
|
||||
const VAddr misc_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
|
||||
|
||||
// Setup the stack region.
|
||||
constexpr size_t StackRegionSize = Common::Size_14_MB;
|
||||
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
||||
const VAddr stack_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
|
||||
|
||||
// Determine the size of the resource region.
|
||||
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
|
||||
|
||||
// Determine the size of the slab region.
|
||||
const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize);
|
||||
ASSERT(slab_region_size <= resource_region_size);
|
||||
|
||||
// Setup the slab region.
|
||||
const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
|
||||
const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
|
||||
const PAddr slab_start_phys_addr = code_end_phys_addr;
|
||||
const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
||||
const size_t slab_region_needed_size =
|
||||
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
|
||||
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
|
||||
const VAddr slab_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
|
||||
(code_end_phys_addr % SlabRegionAlign);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
|
||||
|
||||
// Setup the temp region.
|
||||
constexpr size_t TempRegionSize = Common::Size_128_MB;
|
||||
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||
const VAddr temp_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
|
||||
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
|
||||
KMemoryRegionType_KernelTemp));
|
||||
|
||||
// Automatically map in devices that have auto-map attributes.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
// We only care about kernel regions.
|
||||
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check whether we should map the region.
|
||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this region has already been mapped, no need to consider it.
|
||||
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Set the attribute to note we've mapped this region.
|
||||
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
||||
|
||||
// Create a virtual pair region and insert it into the tree.
|
||||
const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
|
||||
const size_t map_size =
|
||||
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
|
||||
const VAddr map_virt_addr =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
|
||||
}
|
||||
|
||||
Init::SetupDramPhysicalMemoryRegions(memory_layout);
|
||||
|
||||
// Insert a physical region for the kernel code region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
|
||||
|
||||
// Insert a physical region for the kernel slab region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||
|
||||
// Determine size available for kernel page table heaps, requiring > 8 MB.
|
||||
const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
|
||||
ASSERT(page_table_heap_size / Common::Size_4_MB > 2);
|
||||
|
||||
// Insert a physical region for the kernel page table heap region
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||
|
||||
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
|
||||
// mapping. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == KMemoryRegionType_Dram) {
|
||||
// Check that the region is valid.
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
// Set the linear map attribute.
|
||||
region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the linear region extents.
|
||||
const auto linear_extents =
|
||||
memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
|
||||
KMemoryRegionAttr_LinearMapped);
|
||||
ASSERT(linear_extents.GetEndAddress() != 0);
|
||||
|
||||
// Setup the linear mapping region.
|
||||
constexpr size_t LinearRegionAlign = Common::Size_1_GB;
|
||||
const PAddr aligned_linear_phys_start =
|
||||
Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
|
||||
const size_t linear_region_size =
|
||||
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
|
||||
aligned_linear_phys_start;
|
||||
const VAddr linear_region_start =
|
||||
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
|
||||
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
||||
|
||||
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
|
||||
|
||||
// Map and create regions for all the linearly-mapped data.
|
||||
{
|
||||
PAddr cur_phys_addr = 0;
|
||||
u64 cur_size = 0;
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(region.GetEndAddress() != 0);
|
||||
|
||||
if (cur_size == 0) {
|
||||
cur_phys_addr = region.GetAddress();
|
||||
cur_size = region.GetSize();
|
||||
} else if (cur_phys_addr + cur_size == region.GetAddress()) {
|
||||
cur_size += region.GetSize();
|
||||
} else {
|
||||
cur_phys_addr = region.GetAddress();
|
||||
cur_size = region.GetSize();
|
||||
}
|
||||
|
||||
const VAddr region_virt_addr =
|
||||
region.GetAddress() + linear_region_phys_to_virt_diff;
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
region_virt_addr, region.GetSize(),
|
||||
GetTypeForVirtualLinearMapping(region.GetType())));
|
||||
region.SetPairAddress(region_virt_addr);
|
||||
|
||||
KMemoryRegion* virt_region =
|
||||
memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
||||
ASSERT(virt_region != nullptr);
|
||||
virt_region->SetPairAddress(region.GetAddress());
|
||||
}
|
||||
}
|
||||
|
||||
// Insert regions for the initial page table region.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
|
||||
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
|
||||
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
|
||||
KMemoryRegionType_VirtualDramKernelInitPt));
|
||||
|
||||
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
|
||||
// some pool partition. Tag them.
|
||||
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
|
||||
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
|
||||
region.SetType(KMemoryRegionType_DramPoolPartition);
|
||||
}
|
||||
}
|
||||
|
||||
// Setup all other memory regions needed to arrange the pool partitions.
|
||||
Init::SetupPoolPartitionMemoryRegions(memory_layout);
|
||||
|
||||
// Cache all linear regions in their own trees for faster access, later.
|
||||
memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
|
||||
linear_region_start);
|
||||
}
|
||||
|
||||
void InitializeMemoryLayout(const KMemoryLayout& memory_layout) {
|
||||
const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
|
||||
const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
|
||||
const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
|
||||
|
||||
// Initialize memory managers
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
application_pool.GetAddress(),
|
||||
application_pool.GetEndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
|
||||
applet_pool.GetEndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
|
||||
system_pool.GetEndAddress());
|
||||
|
||||
// Setup memory regions for emulated processes
|
||||
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
|
||||
constexpr std::size_t hid_size{0x40000};
|
||||
constexpr std::size_t font_size{0x1100000};
|
||||
constexpr std::size_t irs_size{0x8000};
|
||||
constexpr std::size_t time_size{0x1000};
|
||||
constexpr PAddr hid_addr{layout.System().StartAddress()};
|
||||
constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
|
||||
constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
|
||||
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
// Initialize memory manager
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
layout.Application().StartAddress(),
|
||||
layout.Application().EndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet,
|
||||
layout.Applet().StartAddress(),
|
||||
layout.Applet().EndAddress());
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System,
|
||||
layout.System().StartAddress(),
|
||||
layout.System().EndAddress());
|
||||
const PAddr hid_phys_addr{system_pool.GetAddress()};
|
||||
const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
|
||||
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
|
||||
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
hid_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size,
|
||||
"HID:SharedMemory");
|
||||
font_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size,
|
||||
"Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size,
|
||||
"IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size,
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size,
|
||||
"Time:SharedMemory");
|
||||
}
|
||||
|
||||
void InitializeSlabHeaps() {
|
||||
// Allocate slab heaps
|
||||
user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
|
||||
|
||||
constexpr u64 user_slab_heap_size{0x1ef000};
|
||||
// TODO(ameerj): This should be derived, not hardcoded within the kernel
|
||||
constexpr u64 user_slab_heap_size{0x3de000};
|
||||
// Reserve slab heaps
|
||||
ASSERT(
|
||||
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project / PPSSPP Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
@@ -26,7 +27,6 @@
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
@@ -40,8 +40,9 @@ namespace {
|
||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||
auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||
auto thread_res =
|
||||
KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||
|
||||
std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
@@ -119,9 +120,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
|
||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||
process->name = std::move(name);
|
||||
|
||||
// TODO: This is inaccurate
|
||||
// The process should hold a reference to the kernel-wide resource limit.
|
||||
process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
||||
process->resource_limit = kernel.GetSystemResourceLimit();
|
||||
process->status = ProcessStatus::Created;
|
||||
process->program_id = 0;
|
||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||
@@ -159,17 +158,13 @@ void Process::DecrementThreadCount() {
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
// TODO: This is expected to always return the application memory pool size after accurately
|
||||
// reserving kernel resources. The current workaround uses a process-local resource limit of
|
||||
// application memory pool size, which is inaccurate.
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
|
||||
ASSERT(capacity == kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
if (capacity < memory_usage_capacity) {
|
||||
return capacity;
|
||||
}
|
||||
|
||||
return memory_usage_capacity;
|
||||
}
|
||||
|
||||
@@ -271,10 +266,6 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
@@ -323,16 +314,6 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
|
||||
resource_limit->SetLimitValue(LimitableResource::Threads, 608);
|
||||
resource_limit->SetLimitValue(LimitableResource::Events, 700);
|
||||
resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
|
||||
resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
memory_reservation.Commit();
|
||||
|
||||
@@ -281,11 +281,6 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (svc_number >= svc_capabilities.size()) {
|
||||
LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
|
||||
return ResultOutOfRange;
|
||||
}
|
||||
|
||||
svc_capabilities[svc_number] = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ enum class ProgramType {
|
||||
class ProcessCapabilities {
|
||||
public:
|
||||
using InterruptCapabilities = std::bitset<1024>;
|
||||
using SyscallCapabilities = std::bitset<128>;
|
||||
using SyscallCapabilities = std::bitset<192>;
|
||||
|
||||
ProcessCapabilities() = default;
|
||||
ProcessCapabilities(const ProcessCapabilities&) = delete;
|
||||
|
||||
@@ -1532,8 +1532,9 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||
std::shared_ptr<KThread> thread;
|
||||
{
|
||||
KScopedLightLock lk{process.GetStateLock()};
|
||||
CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority,
|
||||
arg, core_id, stack_bottom, &process));
|
||||
CASCADE_RESULT(thread,
|
||||
KThread::CreateUserThread(system, ThreadType::User, "", entry_point,
|
||||
priority, arg, core_id, stack_bottom, &process));
|
||||
}
|
||||
|
||||
const auto new_thread_handle = process.GetHandleTable().Create(thread);
|
||||
@@ -2155,7 +2156,7 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
|
||||
LOG_DEBUG(Kernel_SVC, "called");
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
auto resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
||||
auto resource_limit = std::make_shared<KResourceLimit>(kernel, system.CoreTiming());
|
||||
|
||||
auto* const current_process = kernel.CurrentProcess();
|
||||
ASSERT(current_process != nullptr);
|
||||
@@ -2454,6 +2455,74 @@ static const FunctionDef SVC_Table_32[] = {
|
||||
{0x79, nullptr, "Unknown"},
|
||||
{0x7A, nullptr, "Unknown"},
|
||||
{0x7B, nullptr, "TerminateProcess32"},
|
||||
{0x7C, nullptr, "GetProcessInfo32"},
|
||||
{0x7D, nullptr, "CreateResourceLimit32"},
|
||||
{0x7E, nullptr, "SetResourceLimitLimitValue32"},
|
||||
{0x7F, nullptr, "CallSecureMonitor32"},
|
||||
{0x80, nullptr, "Unknown"},
|
||||
{0x81, nullptr, "Unknown"},
|
||||
{0x82, nullptr, "Unknown"},
|
||||
{0x83, nullptr, "Unknown"},
|
||||
{0x84, nullptr, "Unknown"},
|
||||
{0x85, nullptr, "Unknown"},
|
||||
{0x86, nullptr, "Unknown"},
|
||||
{0x87, nullptr, "Unknown"},
|
||||
{0x88, nullptr, "Unknown"},
|
||||
{0x89, nullptr, "Unknown"},
|
||||
{0x8A, nullptr, "Unknown"},
|
||||
{0x8B, nullptr, "Unknown"},
|
||||
{0x8C, nullptr, "Unknown"},
|
||||
{0x8D, nullptr, "Unknown"},
|
||||
{0x8E, nullptr, "Unknown"},
|
||||
{0x8F, nullptr, "Unknown"},
|
||||
{0x90, nullptr, "Unknown"},
|
||||
{0x91, nullptr, "Unknown"},
|
||||
{0x92, nullptr, "Unknown"},
|
||||
{0x93, nullptr, "Unknown"},
|
||||
{0x94, nullptr, "Unknown"},
|
||||
{0x95, nullptr, "Unknown"},
|
||||
{0x96, nullptr, "Unknown"},
|
||||
{0x97, nullptr, "Unknown"},
|
||||
{0x98, nullptr, "Unknown"},
|
||||
{0x99, nullptr, "Unknown"},
|
||||
{0x9A, nullptr, "Unknown"},
|
||||
{0x9B, nullptr, "Unknown"},
|
||||
{0x9C, nullptr, "Unknown"},
|
||||
{0x9D, nullptr, "Unknown"},
|
||||
{0x9E, nullptr, "Unknown"},
|
||||
{0x9F, nullptr, "Unknown"},
|
||||
{0xA0, nullptr, "Unknown"},
|
||||
{0xA1, nullptr, "Unknown"},
|
||||
{0xA2, nullptr, "Unknown"},
|
||||
{0xA3, nullptr, "Unknown"},
|
||||
{0xA4, nullptr, "Unknown"},
|
||||
{0xA5, nullptr, "Unknown"},
|
||||
{0xA6, nullptr, "Unknown"},
|
||||
{0xA7, nullptr, "Unknown"},
|
||||
{0xA8, nullptr, "Unknown"},
|
||||
{0xA9, nullptr, "Unknown"},
|
||||
{0xAA, nullptr, "Unknown"},
|
||||
{0xAB, nullptr, "Unknown"},
|
||||
{0xAC, nullptr, "Unknown"},
|
||||
{0xAD, nullptr, "Unknown"},
|
||||
{0xAE, nullptr, "Unknown"},
|
||||
{0xAF, nullptr, "Unknown"},
|
||||
{0xB0, nullptr, "Unknown"},
|
||||
{0xB1, nullptr, "Unknown"},
|
||||
{0xB2, nullptr, "Unknown"},
|
||||
{0xB3, nullptr, "Unknown"},
|
||||
{0xB4, nullptr, "Unknown"},
|
||||
{0xB5, nullptr, "Unknown"},
|
||||
{0xB6, nullptr, "Unknown"},
|
||||
{0xB7, nullptr, "Unknown"},
|
||||
{0xB8, nullptr, "Unknown"},
|
||||
{0xB9, nullptr, "Unknown"},
|
||||
{0xBA, nullptr, "Unknown"},
|
||||
{0xBB, nullptr, "Unknown"},
|
||||
{0xBC, nullptr, "Unknown"},
|
||||
{0xBD, nullptr, "Unknown"},
|
||||
{0xBE, nullptr, "Unknown"},
|
||||
{0xBF, nullptr, "Unknown"},
|
||||
};
|
||||
|
||||
static const FunctionDef SVC_Table_64[] = {
|
||||
@@ -2585,6 +2654,70 @@ static const FunctionDef SVC_Table_64[] = {
|
||||
{0x7D, SvcWrap64<CreateResourceLimit>, "CreateResourceLimit"},
|
||||
{0x7E, SvcWrap64<SetResourceLimitLimitValue>, "SetResourceLimitLimitValue"},
|
||||
{0x7F, nullptr, "CallSecureMonitor"},
|
||||
{0x80, nullptr, "Unknown"},
|
||||
{0x81, nullptr, "Unknown"},
|
||||
{0x82, nullptr, "Unknown"},
|
||||
{0x83, nullptr, "Unknown"},
|
||||
{0x84, nullptr, "Unknown"},
|
||||
{0x85, nullptr, "Unknown"},
|
||||
{0x86, nullptr, "Unknown"},
|
||||
{0x87, nullptr, "Unknown"},
|
||||
{0x88, nullptr, "Unknown"},
|
||||
{0x89, nullptr, "Unknown"},
|
||||
{0x8A, nullptr, "Unknown"},
|
||||
{0x8B, nullptr, "Unknown"},
|
||||
{0x8C, nullptr, "Unknown"},
|
||||
{0x8D, nullptr, "Unknown"},
|
||||
{0x8E, nullptr, "Unknown"},
|
||||
{0x8F, nullptr, "Unknown"},
|
||||
{0x90, nullptr, "Unknown"},
|
||||
{0x91, nullptr, "Unknown"},
|
||||
{0x92, nullptr, "Unknown"},
|
||||
{0x93, nullptr, "Unknown"},
|
||||
{0x94, nullptr, "Unknown"},
|
||||
{0x95, nullptr, "Unknown"},
|
||||
{0x96, nullptr, "Unknown"},
|
||||
{0x97, nullptr, "Unknown"},
|
||||
{0x98, nullptr, "Unknown"},
|
||||
{0x99, nullptr, "Unknown"},
|
||||
{0x9A, nullptr, "Unknown"},
|
||||
{0x9B, nullptr, "Unknown"},
|
||||
{0x9C, nullptr, "Unknown"},
|
||||
{0x9D, nullptr, "Unknown"},
|
||||
{0x9E, nullptr, "Unknown"},
|
||||
{0x9F, nullptr, "Unknown"},
|
||||
{0xA0, nullptr, "Unknown"},
|
||||
{0xA1, nullptr, "Unknown"},
|
||||
{0xA2, nullptr, "Unknown"},
|
||||
{0xA3, nullptr, "Unknown"},
|
||||
{0xA4, nullptr, "Unknown"},
|
||||
{0xA5, nullptr, "Unknown"},
|
||||
{0xA6, nullptr, "Unknown"},
|
||||
{0xA7, nullptr, "Unknown"},
|
||||
{0xA8, nullptr, "Unknown"},
|
||||
{0xA9, nullptr, "Unknown"},
|
||||
{0xAA, nullptr, "Unknown"},
|
||||
{0xAB, nullptr, "Unknown"},
|
||||
{0xAC, nullptr, "Unknown"},
|
||||
{0xAD, nullptr, "Unknown"},
|
||||
{0xAE, nullptr, "Unknown"},
|
||||
{0xAF, nullptr, "Unknown"},
|
||||
{0xB0, nullptr, "Unknown"},
|
||||
{0xB1, nullptr, "Unknown"},
|
||||
{0xB2, nullptr, "Unknown"},
|
||||
{0xB3, nullptr, "Unknown"},
|
||||
{0xB4, nullptr, "Unknown"},
|
||||
{0xB5, nullptr, "Unknown"},
|
||||
{0xB6, nullptr, "Unknown"},
|
||||
{0xB7, nullptr, "Unknown"},
|
||||
{0xB8, nullptr, "Unknown"},
|
||||
{0xB9, nullptr, "Unknown"},
|
||||
{0xBA, nullptr, "Unknown"},
|
||||
{0xBB, nullptr, "Unknown"},
|
||||
{0xBC, nullptr, "Unknown"},
|
||||
{0xBD, nullptr, "Unknown"},
|
||||
{0xBE, nullptr, "Unknown"},
|
||||
{0xBF, nullptr, "Unknown"},
|
||||
};
|
||||
|
||||
static const FunctionDef* GetSVCInfo32(u32 func_num) {
|
||||
|
||||
@@ -508,7 +508,7 @@ public:
|
||||
{1, &IManagerForApplication::GetAccountId, "GetAccountId"},
|
||||
{2, nullptr, "EnsureIdTokenCacheAsync"},
|
||||
{3, nullptr, "LoadIdTokenCache"},
|
||||
{130, nullptr, "GetNintendoAccountUserResourceCacheForApplication"},
|
||||
{130, &IManagerForApplication::GetNintendoAccountUserResourceCacheForApplication, "GetNintendoAccountUserResourceCacheForApplication"},
|
||||
{150, nullptr, "CreateAuthorizationRequest"},
|
||||
{160, &IManagerForApplication::StoreOpenContext, "StoreOpenContext"},
|
||||
{170, nullptr, "LoadNetworkServiceLicenseKindAsync"},
|
||||
@@ -534,6 +534,22 @@ private:
|
||||
rb.PushRaw<u64>(user_id.GetNintendoID());
|
||||
}
|
||||
|
||||
void GetNintendoAccountUserResourceCacheForApplication(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
|
||||
std::vector<u8> nas_user_base_for_application(0x68);
|
||||
ctx.WriteBuffer(nas_user_base_for_application, 0);
|
||||
|
||||
if (ctx.CanWriteBuffer(1)) {
|
||||
std::vector<u8> unknown_out_buffer(ctx.GetWriteBufferSize(1));
|
||||
ctx.WriteBuffer(unknown_out_buffer, 1);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushRaw<u64>(user_id.GetNintendoID());
|
||||
}
|
||||
|
||||
void StoreOpenContext(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_ACC, "(STUBBED) called");
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
@@ -594,12 +610,17 @@ public:
|
||||
explicit DAUTH_O(Core::System& system_, Common::UUID) : ServiceFramework{system_, "dauth:o"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "EnsureAuthenticationTokenCacheAsync"}, // [5.0.0-5.1.0] GeneratePostData
|
||||
{1, nullptr, "LoadAuthenticationTokenCache"}, // 6.0.0+
|
||||
{2, nullptr, "InvalidateAuthenticationTokenCache"}, // 6.0.0+
|
||||
{10, nullptr, "EnsureEdgeTokenCacheAsync"}, // 6.0.0+
|
||||
{11, nullptr, "LoadEdgeTokenCache"}, // 6.0.0+
|
||||
{12, nullptr, "InvalidateEdgeTokenCache"}, // 6.0.0+
|
||||
{0, nullptr, "EnsureAuthenticationTokenCacheAsync"},
|
||||
{1, nullptr, "LoadAuthenticationTokenCache"},
|
||||
{2, nullptr, "InvalidateAuthenticationTokenCache"},
|
||||
{10, nullptr, "EnsureEdgeTokenCacheAsync"},
|
||||
{11, nullptr, "LoadEdgeTokenCache"},
|
||||
{12, nullptr, "InvalidateEdgeTokenCache"},
|
||||
{20, nullptr, "EnsureApplicationAuthenticationCacheAsync"},
|
||||
{21, nullptr, "LoadApplicationAuthenticationTokenCache"},
|
||||
{22, nullptr, "LoadApplicationNetworkServiceClientConfigCache"},
|
||||
{23, nullptr, "IsApplicationAuthenticationCacheAvailable"},
|
||||
{24, nullptr, "InvalidateApplicationAuthenticationCache"},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
|
||||
@@ -17,28 +17,30 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
|
||||
{3, &ACC_SU::ListOpenUsers, "ListOpenUsers"},
|
||||
{4, &ACC_SU::GetLastOpenedUser, "GetLastOpenedUser"},
|
||||
{5, &ACC_SU::GetProfile, "GetProfile"},
|
||||
{6, nullptr, "GetProfileDigest"}, // 3.0.0+
|
||||
{6, nullptr, "GetProfileDigest"},
|
||||
{50, &ACC_SU::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
|
||||
{51, &ACC_SU::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
|
||||
{60, &ACC_SU::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
|
||||
{60, &ACC_SU::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"},
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"},
|
||||
{100, nullptr, "GetUserRegistrationNotifier"},
|
||||
{101, nullptr, "GetUserStateChangeNotifier"},
|
||||
{102, nullptr, "GetBaasAccountManagerForSystemService"},
|
||||
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
|
||||
{104, nullptr, "GetProfileUpdateNotifier"},
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
|
||||
{106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"},
|
||||
{106, nullptr, "GetProfileSyncNotifier"},
|
||||
{110, &ACC_SU::StoreSaveDataThumbnailSystem, "StoreSaveDataThumbnail"},
|
||||
{111, nullptr, "ClearSaveDataThumbnail"},
|
||||
{112, nullptr, "LoadSaveDataThumbnail"},
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
|
||||
{120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
|
||||
{130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
|
||||
{140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
|
||||
{150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
|
||||
{190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
|
||||
{191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"},
|
||||
{120, nullptr, "ListOpenUsersInApplication"},
|
||||
{130, nullptr, "ActivateOpenContextRetention"},
|
||||
{140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"},
|
||||
{150, nullptr, "AuthenticateApplicationAsync"},
|
||||
{151, nullptr, "Unknown151"},
|
||||
{152, nullptr, "Unknown152"},
|
||||
{190, nullptr, "GetUserLastOpenedApplication"},
|
||||
{191, nullptr, "ActivateOpenContextHolder"},
|
||||
{200, nullptr, "BeginUserRegistration"},
|
||||
{201, nullptr, "CompleteUserRegistration"},
|
||||
{202, nullptr, "CancelUserRegistration"},
|
||||
@@ -46,15 +48,15 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
|
||||
{204, nullptr, "SetUserPosition"},
|
||||
{205, &ACC_SU::GetProfileEditor, "GetProfileEditor"},
|
||||
{206, nullptr, "CompleteUserRegistrationForcibly"},
|
||||
{210, nullptr, "CreateFloatingRegistrationRequest"}, // 3.0.0+
|
||||
{211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
|
||||
{212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"}, // 8.0.0+
|
||||
{210, nullptr, "CreateFloatingRegistrationRequest"},
|
||||
{211, nullptr, "CreateProcedureToRegisterUserWithNintendoAccount"},
|
||||
{212, nullptr, "ResumeProcedureToRegisterUserWithNintendoAccount"},
|
||||
{230, nullptr, "AuthenticateServiceAsync"},
|
||||
{250, nullptr, "GetBaasAccountAdministrator"},
|
||||
{290, nullptr, "ProxyProcedureForGuestLoginWithNintendoAccount"},
|
||||
{291, nullptr, "ProxyProcedureForFloatingRegistrationWithNintendoAccount"}, // 3.0.0+
|
||||
{291, nullptr, "ProxyProcedureForFloatingRegistrationWithNintendoAccount"},
|
||||
{299, nullptr, "SuspendBackgroundDaemon"},
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"},
|
||||
{998, nullptr, "DebugSetUserStateClose"},
|
||||
{999, nullptr, "DebugSetUserStateOpen"},
|
||||
};
|
||||
|
||||
@@ -17,29 +17,31 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
|
||||
{3, &ACC_U1::ListOpenUsers, "ListOpenUsers"},
|
||||
{4, &ACC_U1::GetLastOpenedUser, "GetLastOpenedUser"},
|
||||
{5, &ACC_U1::GetProfile, "GetProfile"},
|
||||
{6, nullptr, "GetProfileDigest"}, // 3.0.0+
|
||||
{6, nullptr, "GetProfileDigest"},
|
||||
{50, &ACC_U1::IsUserRegistrationRequestPermitted, "IsUserRegistrationRequestPermitted"},
|
||||
{51, &ACC_U1::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
|
||||
{60, &ACC_U1::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"}, // 5.0.0 - 5.1.0
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"}, // 6.0.0+
|
||||
{60, &ACC_U1::ListOpenContextStoredUsers, "ListOpenContextStoredUsers"},
|
||||
{99, nullptr, "DebugActivateOpenContextRetention"},
|
||||
{100, nullptr, "GetUserRegistrationNotifier"},
|
||||
{101, nullptr, "GetUserStateChangeNotifier"},
|
||||
{102, nullptr, "GetBaasAccountManagerForSystemService"},
|
||||
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
|
||||
{104, nullptr, "GetProfileUpdateNotifier"},
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
|
||||
{106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
|
||||
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"},
|
||||
{106, nullptr, "GetProfileSyncNotifier"},
|
||||
{110, &ACC_U1::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"},
|
||||
{111, nullptr, "ClearSaveDataThumbnail"},
|
||||
{112, nullptr, "LoadSaveDataThumbnail"},
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
|
||||
{120, nullptr, "ListOpenUsersInApplication"}, // 10.0.0+
|
||||
{130, nullptr, "ActivateOpenContextRetention"}, // 6.0.0+
|
||||
{140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"}, // 6.0.0+
|
||||
{150, nullptr, "AuthenticateApplicationAsync"}, // 10.0.0+
|
||||
{190, nullptr, "GetUserLastOpenedApplication"}, // 1.0.0 - 9.2.0
|
||||
{191, nullptr, "ActivateOpenContextHolder"}, // 7.0.0+
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"}, // 3.0.0+
|
||||
{113, nullptr, "GetSaveDataThumbnailExistence"},
|
||||
{120, nullptr, "ListOpenUsersInApplication"},
|
||||
{130, nullptr, "ActivateOpenContextRetention"},
|
||||
{140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"},
|
||||
{150, nullptr, "AuthenticateApplicationAsync"},
|
||||
{151, nullptr, "Unknown151"},
|
||||
{152, nullptr, "Unknown152"},
|
||||
{190, nullptr, "GetUserLastOpenedApplication"},
|
||||
{191, nullptr, "ActivateOpenContextHolder"},
|
||||
{997, nullptr, "DebugInvalidateTokenCacheForUser"},
|
||||
{998, nullptr, "DebugSetUserStateClose"},
|
||||
{999, nullptr, "DebugSetUserStateOpen"},
|
||||
};
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/file_util.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/hle/service/acc/profile_manager.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Service::Account {
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user