Compare commits
253 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3e87d70ec | ||
|
|
d69421b1db | ||
|
|
e44622860a | ||
|
|
5ee669466f | ||
|
|
eeea426c74 | ||
|
|
ca30190fad | ||
|
|
b3f68098d5 | ||
|
|
f234531f92 | ||
|
|
9b50b23a50 | ||
|
|
fb7dcbf7af | ||
|
|
a63dcb6d56 | ||
|
|
f8561c7a65 | ||
|
|
a527e5f0cd | ||
|
|
9cebde760f | ||
|
|
8a5794c4db | ||
|
|
cb7f2e5616 | ||
|
|
3b98fab850 | ||
|
|
7c26a9aefe | ||
|
|
2f83d9a61b | ||
|
|
99fdfa1fcd | ||
|
|
65774084fd | ||
|
|
b83eb4dd18 | ||
|
|
8c016b02e7 | ||
|
|
52dae41d7f | ||
|
|
716285fab8 | ||
|
|
dde074eaab | ||
|
|
875183e7c5 | ||
|
|
a50133fc5e | ||
|
|
e274e38205 | ||
|
|
a35717b245 | ||
|
|
538f097f97 | ||
|
|
afa4bcbb3b | ||
|
|
2e85ee250d | ||
|
|
cb48ed2e1a | ||
|
|
4aa8189328 | ||
|
|
ec514a4d1b | ||
|
|
e1f7938a3b | ||
|
|
ab102787fa | ||
|
|
8441094ba3 | ||
|
|
0687a8370d | ||
|
|
df9899eed6 | ||
|
|
824e53149d | ||
|
|
9761618a8d | ||
|
|
d3a4a192fe | ||
|
|
2f30c10584 | ||
|
|
c7553abe89 | ||
|
|
20eb368e14 | ||
|
|
f6566338eb | ||
|
|
2985e5e94c | ||
|
|
3b85ac2ac4 | ||
|
|
4735d18bb9 | ||
|
|
a9d24b0df3 | ||
|
|
5dae45b958 | ||
|
|
827dcad26e | ||
|
|
4439801c0f | ||
|
|
ad653550eb | ||
|
|
59173ff7a7 | ||
|
|
87cfe5b1da | ||
|
|
2490ffbbce | ||
|
|
daf5c5060b | ||
|
|
d1a7b2eca7 | ||
|
|
9c4c9f1e7d | ||
|
|
69ce5e41eb | ||
|
|
9d77ae39de | ||
|
|
de21c9e330 | ||
|
|
8f7eb194af | ||
|
|
cd7abba1a9 | ||
|
|
41e94b7b99 | ||
|
|
4bcc5bacff | ||
|
|
68ffac250a | ||
|
|
1f228c51ca | ||
|
|
4cf5b860bd | ||
|
|
47af34003b | ||
|
|
97415ad07a | ||
|
|
7b29a8ce4e | ||
|
|
a5ab85ac37 | ||
|
|
9d010be483 | ||
|
|
34a3ee1631 | ||
|
|
96c9e67b1b | ||
|
|
6faabd6d69 | ||
|
|
e7038344aa | ||
|
|
5213f70230 | ||
|
|
0639244d85 | ||
|
|
b8b5891585 | ||
|
|
394475c4e3 | ||
|
|
50ee9c46ab | ||
|
|
6ab839462c | ||
|
|
f8bfec3109 | ||
|
|
94da1e8a7e | ||
|
|
4a45012f35 | ||
|
|
5ad62e7bfc | ||
|
|
925671071c | ||
|
|
cd25817938 | ||
|
|
c7a7e47615 | ||
|
|
9b3af0027b | ||
|
|
ac8b1445ff | ||
|
|
52e9d7fa49 | ||
|
|
2910aa77b2 | ||
|
|
9e9341f4b4 | ||
|
|
ee9ebeeb80 | ||
|
|
e895ab7d6f | ||
|
|
4738e14cb0 | ||
|
|
55f556c53e | ||
|
|
ab65cb499d | ||
|
|
51fb0a6f96 | ||
|
|
09f7c355c6 | ||
|
|
bfa1644464 | ||
|
|
272bc4c3d6 | ||
|
|
1ba578c4aa | ||
|
|
d31dbb1bc1 | ||
|
|
aae399c1a8 | ||
|
|
1841ca4b9b | ||
|
|
71526ecfc7 | ||
|
|
ae876ed047 | ||
|
|
fb0b4c7e27 | ||
|
|
20245e660f | ||
|
|
ec19a85890 | ||
|
|
3de8e7a8f2 | ||
|
|
3d0394681c | ||
|
|
8e4c9c9852 | ||
|
|
2807a98168 | ||
|
|
1a5d4d7840 | ||
|
|
def03d4075 | ||
|
|
3acb265c9e | ||
|
|
728ee181eb | ||
|
|
93e20867b0 | ||
|
|
b1e27890e8 | ||
|
|
93109c870e | ||
|
|
65e0178cc0 | ||
|
|
9e520e8f12 | ||
|
|
1d162f28d1 | ||
|
|
7ed5dd0d62 | ||
|
|
701ef616b2 | ||
|
|
f7a008d77f | ||
|
|
6a19086001 | ||
|
|
a02566136c | ||
|
|
e7c33d1ad6 | ||
|
|
c9235764c7 | ||
|
|
6da91da08e | ||
|
|
24e1e17a8a | ||
|
|
b5b92fd1e5 | ||
|
|
0d62f30b00 | ||
|
|
51e8b2733c | ||
|
|
9cae3e6e90 | ||
|
|
0c24ae300c | ||
|
|
6686468df0 | ||
|
|
1c550ff954 | ||
|
|
c864f2c532 | ||
|
|
90f93a408a | ||
|
|
f3805376f7 | ||
|
|
9ca5e52f07 | ||
|
|
6be0975bf2 | ||
|
|
723e038dba | ||
|
|
aaccb21f81 | ||
|
|
df1a9d09a9 | ||
|
|
24d0cc3ab8 | ||
|
|
86212d4bcd | ||
|
|
f3345e84ad | ||
|
|
592a649918 | ||
|
|
0a91599aec | ||
|
|
cffa6f4e62 | ||
|
|
ed543c4d5c | ||
|
|
b53b50adec | ||
|
|
48cfc47050 | ||
|
|
90610bde9b | ||
|
|
9d8f793969 | ||
|
|
8378b8a61f | ||
|
|
fb54c38631 | ||
|
|
6269cd7f1d | ||
|
|
b0a3915351 | ||
|
|
eae9f2e440 | ||
|
|
d9a8060ce3 | ||
|
|
594973bdd2 | ||
|
|
51c13606d6 | ||
|
|
d25011c92f | ||
|
|
0bbf5e61f1 | ||
|
|
b8ffdbb167 | ||
|
|
21b40de318 | ||
|
|
70353649d7 | ||
|
|
95722823b9 | ||
|
|
c7325c6a4c | ||
|
|
b675c44e49 | ||
|
|
3c37d66c28 | ||
|
|
09722cb4a7 | ||
|
|
77564f987c | ||
|
|
ac265a72ce | ||
|
|
83227ad981 | ||
|
|
dd9caf9aa0 | ||
|
|
6171566296 | ||
|
|
682d82faf3 | ||
|
|
710aa22f7c | ||
|
|
6f1ad6aa9f | ||
|
|
06e3d3a658 | ||
|
|
757fd1e917 | ||
|
|
d3c7a7e7cf | ||
|
|
13becdf18a | ||
|
|
5b35b01070 | ||
|
|
025fe458ae | ||
|
|
3a2eefb16c | ||
|
|
0b8b961442 | ||
|
|
93a69b6cc8 | ||
|
|
7402442442 | ||
|
|
75fd3f95a3 | ||
|
|
0b631f22fc | ||
|
|
3da87d3f12 | ||
|
|
2b95c137ff | ||
|
|
ec9354d6d9 | ||
|
|
a02b4e1df6 | ||
|
|
35df1d1864 | ||
|
|
8fd518ec40 | ||
|
|
82c2601555 | ||
|
|
a39d9c5194 | ||
|
|
47d5ec6cfc | ||
|
|
40ed0cb920 | ||
|
|
1a987054c5 | ||
|
|
79afdeaf08 | ||
|
|
004a8d6a7a | ||
|
|
16f97ded21 | ||
|
|
9735c34f5d | ||
|
|
dde19e7d75 | ||
|
|
75ccd9959c | ||
|
|
19156292a3 | ||
|
|
9d8ca6cc4a | ||
|
|
069afcc633 | ||
|
|
7ad63ea542 | ||
|
|
d28b942458 | ||
|
|
4a7fd91857 | ||
|
|
c86d770af9 | ||
|
|
ec9b6641b1 | ||
|
|
5fa6b15215 | ||
|
|
37939482fb | ||
|
|
dcc0617cc2 | ||
|
|
a0379c2db5 | ||
|
|
e53b6ecc76 | ||
|
|
f06c3f4907 | ||
|
|
886043a6d2 | ||
|
|
3e6e0d8f13 | ||
|
|
dee133ab3d | ||
|
|
3c22ce035b | ||
|
|
a5e184e948 | ||
|
|
c44ab0f8f6 | ||
|
|
0e004269a9 | ||
|
|
68f718943e | ||
|
|
0cd40fb523 | ||
|
|
1dab8acf5f | ||
|
|
d64ba58759 | ||
|
|
864762cac9 | ||
|
|
5c7c212f61 | ||
|
|
bcd4e4f650 | ||
|
|
a994a40467 | ||
|
|
52b79ac009 | ||
|
|
8e77d331be | ||
|
|
e6f9fe1f60 |
18
.ci/scripts/clang/docker.sh
Executable file
18
.ci/scripts/clang/docker.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Exit on error, rather than continuing with the rest of the script.
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/clang -DCMAKE_CXX_COMPILER=/usr/lib/ccache/clang++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DCMAKE_INSTALL_PREFIX="/usr"
|
||||
|
||||
make -j$(nproc)
|
||||
|
||||
ccache -s
|
||||
|
||||
ctest -VV -C Release
|
||||
|
||||
8
.ci/scripts/clang/exec.sh
Normal file
8
.ci/scripts/clang/exec.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
mkdir -p "ccache" || true
|
||||
chmod a+x ./.ci/scripts/clang/docker.sh
|
||||
# the UID for the container yuzu user is 1027
|
||||
sudo chown -R 1027 ./
|
||||
docker run -e ENABLE_COMPATIBILITY_REPORTING -e CCACHE_DIR=/yuzu/ccache -v $(pwd):/yuzu yuzuemu/build-environments:linux-fresh /bin/bash /yuzu/.ci/scripts/clang/docker.sh $1
|
||||
sudo chown -R $UID ./
|
||||
20
.ci/scripts/clang/upload.sh
Normal file
20
.ci/scripts/clang/upload.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. .ci/scripts/common/pre-upload.sh
|
||||
|
||||
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.xz"
|
||||
COMPRESSION_FLAGS="-cJvf"
|
||||
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
DIR_NAME="${REV_NAME}"
|
||||
else
|
||||
DIR_NAME="${REV_NAME}_${RELEASE_NAME}"
|
||||
fi
|
||||
|
||||
mkdir "$DIR_NAME"
|
||||
|
||||
cp build/bin/yuzu-cmd "$DIR_NAME"
|
||||
cp build/bin/yuzu "$DIR_NAME"
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
@@ -12,6 +12,9 @@ jobs:
|
||||
windows:
|
||||
BuildSuffix: 'windows-mingw'
|
||||
ScriptFolder: 'windows'
|
||||
clang:
|
||||
BuildSuffix: 'clang'
|
||||
ScriptFolder: 'clang'
|
||||
linux:
|
||||
BuildSuffix: 'linux'
|
||||
ScriptFolder: 'linux'
|
||||
@@ -24,4 +27,4 @@ jobs:
|
||||
parameters:
|
||||
artifactSource: 'false'
|
||||
cache: $(parameters.cache)
|
||||
version: $(parameters.version)
|
||||
version: $(parameters.version)
|
||||
|
||||
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -27,7 +27,7 @@
|
||||
url = https://github.com/ReinUsesLisp/sirit
|
||||
[submodule "mbedtls"]
|
||||
path = externals/mbedtls
|
||||
url = https://github.com/DarkLordZach/mbedtls
|
||||
url = https://github.com/yuzu-emu/mbedtls
|
||||
[submodule "libzip"]
|
||||
path = externals/libzip/libzip
|
||||
url = https://github.com/nih-at/libzip.git
|
||||
@@ -37,6 +37,6 @@
|
||||
[submodule "opus"]
|
||||
path = externals/opus/opus
|
||||
url = https://github.com/xiph/opus.git
|
||||
[submodule "externals/ffmpeg"]
|
||||
[submodule "ffmpeg"]
|
||||
path = externals/ffmpeg
|
||||
url = https://git.ffmpeg.org/ffmpeg.git
|
||||
|
||||
@@ -504,7 +504,7 @@ if (YUZU_USE_BUNDLED_FFMPEG)
|
||||
endif()
|
||||
else() # WIN32
|
||||
# Use yuzu FFmpeg binaries
|
||||
set(FFmpeg_EXT_NAME "ffmpeg-4.2.1")
|
||||
set(FFmpeg_EXT_NAME "ffmpeg-4.3.1")
|
||||
set(FFmpeg_PATH "${CMAKE_BINARY_DIR}/externals/${FFmpeg_EXT_NAME}")
|
||||
download_bundled_external("ffmpeg/" ${FFmpeg_EXT_NAME} "")
|
||||
set(FFmpeg_FOUND YES)
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
function(copy_yuzu_FFmpeg_deps target_dir)
|
||||
include(WindowsCopyFiles)
|
||||
set(DLL_DEST "${CMAKE_BINARY_DIR}/bin/$<CONFIG>/")
|
||||
windows_copy_files(${target_dir} ${FFmpeg_DLL_DIR} ${DLL_DEST}
|
||||
avcodec-58.dll
|
||||
avutil-56.dll
|
||||
swresample-3.dll
|
||||
swscale-5.dll
|
||||
)
|
||||
file(READ "${FFmpeg_PATH}/requirements.txt" FFmpeg_REQUIRED_DLLS)
|
||||
windows_copy_files(${target_dir} ${FFmpeg_DLL_DIR} ${DLL_DEST} ${FFmpeg_REQUIRED_DLLS})
|
||||
endfunction(copy_yuzu_FFmpeg_deps)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[Icon Theme]
|
||||
Name=colorful_dark
|
||||
Comment=Colorful theme (Dark style)
|
||||
Inherits=default
|
||||
Inherits=colorful
|
||||
Directories=16x16
|
||||
|
||||
[16x16]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[Icon Theme]
|
||||
Name=colorful_midnight_blue
|
||||
Comment=Colorful theme (Midnight Blue style)
|
||||
Inherits=default
|
||||
Inherits=colorful
|
||||
Directories=16x16
|
||||
|
||||
[16x16]
|
||||
|
||||
@@ -1257,10 +1257,6 @@ QComboBox::item:alternate {
|
||||
background: #19232D;
|
||||
}
|
||||
|
||||
QComboBox::item:checked {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
QComboBox::item:selected {
|
||||
border: 0px solid transparent;
|
||||
}
|
||||
|
||||
BIN
dist/yuzu.bmp
vendored
Normal file
BIN
dist/yuzu.bmp
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 256 KiB |
4
externals/CMakeLists.txt
vendored
4
externals/CMakeLists.txt
vendored
@@ -64,8 +64,8 @@ endif()
|
||||
add_subdirectory(sirit)
|
||||
|
||||
# libzip
|
||||
find_package(Libzip 1.5)
|
||||
if (NOT LIBZIP_FOUND)
|
||||
find_package(libzip 1.5)
|
||||
if (NOT libzip_FOUND)
|
||||
message(STATUS "libzip 1.5 or newer not found, falling back to externals")
|
||||
add_subdirectory(libzip EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 8c09da666a...c28f13af97
72
externals/find-modules/FindLibzip.cmake
vendored
72
externals/find-modules/FindLibzip.cmake
vendored
@@ -1,72 +0,0 @@
|
||||
|
||||
find_package(PkgConfig QUIET)
|
||||
pkg_check_modules(PC_LIBZIP QUIET libzip)
|
||||
|
||||
find_path(LIBZIP_INCLUDE_DIR
|
||||
NAMES zip.h
|
||||
PATHS ${PC_LIBZIP_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{INCLUDE}"
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
)
|
||||
find_path(LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
NAMES zipconf.h
|
||||
HINTS ${PC_LIBZIP_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{LIB_DIR}/lib/libzip/include"
|
||||
"$ENV{LIB}/lib/libzip/include"
|
||||
/usr/local/lib/libzip/include
|
||||
/usr/lib/libzip/include
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
"$ENV{INCLUDE}"
|
||||
)
|
||||
find_library(LIBZIP_LIBRARY
|
||||
NAMES zip
|
||||
PATHS ${PC_LIBZIP_LIBRARY_DIRS}
|
||||
"$ENV{LIB_DIR}/lib" "$ENV{LIB}" /usr/local/lib /usr/lib
|
||||
)
|
||||
|
||||
if (LIBZIP_INCLUDE_DIR_ZIPCONF)
|
||||
FILE(READ "${LIBZIP_INCLUDE_DIR_ZIPCONF}/zipconf.h" _LIBZIP_VERSION_CONTENTS)
|
||||
if (_LIBZIP_VERSION_CONTENTS)
|
||||
STRING(REGEX REPLACE ".*#define LIBZIP_VERSION \"([0-9.]+)\".*" "\\1" LIBZIP_VERSION "${_LIBZIP_VERSION_CONTENTS}")
|
||||
endif()
|
||||
unset(_LIBZIP_VERSION_CONTENTS)
|
||||
endif()
|
||||
|
||||
set(LIBZIP_VERSION ${LIBZIP_VERSION} CACHE STRING "Version number of libzip")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Libzip
|
||||
FOUND_VAR LIBZIP_FOUND
|
||||
REQUIRED_VARS
|
||||
LIBZIP_LIBRARY
|
||||
LIBZIP_INCLUDE_DIR
|
||||
LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
LIBZIP_VERSION
|
||||
VERSION_VAR LIBZIP_VERSION
|
||||
)
|
||||
|
||||
if(LIBZIP_FOUND)
|
||||
set(LIBZIP_LIBRARIES ${LIBZIP_LIBRARY})
|
||||
set(LIBZIP_INCLUDE_DIRS ${LIBZIP_INCLUDE_DIR})
|
||||
set(LIBZIP_DEFINITIONS ${PC_LIBZIP_CFLAGS_OTHER})
|
||||
endif()
|
||||
|
||||
if(LIBZIP_FOUND AND NOT TARGET libzip::libzip)
|
||||
add_library(libzip::libzip UNKNOWN IMPORTED)
|
||||
set_target_properties(libzip::libzip PROPERTIES
|
||||
IMPORTED_LOCATION "${LIBZIP_LIBRARY}"
|
||||
INTERFACE_COMPILE_OPTIONS "${PC_LIBZIP_CFLAGS_OTHER}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${LIBZIP_INCLUDE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
LIBZIP_INCLUDE_DIR
|
||||
LIBZIP_INCLUDE_DIR_ZIPCONF
|
||||
LIBZIP_LIBRARY
|
||||
LIBZIP_VERSION
|
||||
)
|
||||
72
externals/find-modules/Findlibzip.cmake
vendored
Normal file
72
externals/find-modules/Findlibzip.cmake
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
|
||||
find_package(PkgConfig QUIET)
|
||||
pkg_check_modules(PC_libzip QUIET libzip)
|
||||
|
||||
find_path(libzip_INCLUDE_DIR
|
||||
NAMES zip.h
|
||||
PATHS ${PC_libzip_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{INCLUDE}"
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
)
|
||||
find_path(libzip_INCLUDE_DIR_ZIPCONF
|
||||
NAMES zipconf.h
|
||||
HINTS ${PC_libzip_INCLUDE_DIRS}
|
||||
"$ENV{LIB_DIR}/include"
|
||||
"$ENV{LIB_DIR}/lib/libzip/include"
|
||||
"$ENV{LIB}/lib/libzip/include"
|
||||
/usr/local/lib/libzip/include
|
||||
/usr/lib/libzip/include
|
||||
/usr/local/include
|
||||
/usr/include
|
||||
"$ENV{INCLUDE}"
|
||||
)
|
||||
find_library(libzip_LIBRARY
|
||||
NAMES zip
|
||||
PATHS ${PC_libzip_LIBRARY_DIRS}
|
||||
"$ENV{LIB_DIR}/lib" "$ENV{LIB}" /usr/local/lib /usr/lib
|
||||
)
|
||||
|
||||
if (libzip_INCLUDE_DIR_ZIPCONF)
|
||||
FILE(READ "${libzip_INCLUDE_DIR_ZIPCONF}/zipconf.h" _libzip_VERSION_CONTENTS)
|
||||
if (_libzip_VERSION_CONTENTS)
|
||||
STRING(REGEX REPLACE ".*#define LIBZIP_VERSION \"([0-9.]+)\".*" "\\1" libzip_VERSION "${_libzip_VERSION_CONTENTS}")
|
||||
endif()
|
||||
unset(_libzip_VERSION_CONTENTS)
|
||||
endif()
|
||||
|
||||
set(libzip_VERSION ${libzip_VERSION} CACHE STRING "Version number of libzip")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(libzip
|
||||
FOUND_VAR libzip_FOUND
|
||||
REQUIRED_VARS
|
||||
libzip_LIBRARY
|
||||
libzip_INCLUDE_DIR
|
||||
libzip_INCLUDE_DIR_ZIPCONF
|
||||
libzip_VERSION
|
||||
VERSION_VAR libzip_VERSION
|
||||
)
|
||||
|
||||
if(libzip_FOUND)
|
||||
set(libzip_LIBRARIES ${libzip_LIBRARY})
|
||||
set(libzip_INCLUDE_DIRS ${libzip_INCLUDE_DIR})
|
||||
set(libzip_DEFINITIONS ${PC_libzip_CFLAGS_OTHER})
|
||||
endif()
|
||||
|
||||
if(libzip_FOUND AND NOT TARGET libzip::libzip)
|
||||
add_library(libzip::libzip UNKNOWN IMPORTED)
|
||||
set_target_properties(libzip::libzip PROPERTIES
|
||||
IMPORTED_LOCATION "${libzip_LIBRARY}"
|
||||
INTERFACE_COMPILE_OPTIONS "${PC_libzip_CFLAGS_OTHER}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${libzip_INCLUDE_DIR}"
|
||||
)
|
||||
endif()
|
||||
|
||||
mark_as_advanced(
|
||||
libzip_INCLUDE_DIR
|
||||
libzip_INCLUDE_DIR_ZIPCONF
|
||||
libzip_LIBRARY
|
||||
libzip_VERSION
|
||||
)
|
||||
3
externals/glad/include/glad/glad.h
vendored
3
externals/glad/include/glad/glad.h
vendored
@@ -5156,6 +5156,9 @@ GLAPI PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv;
|
||||
typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDPROC)(GLuint index, GLdouble n, GLdouble f);
|
||||
GLAPI PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed;
|
||||
#define glDepthRangeIndexed glad_glDepthRangeIndexed
|
||||
typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDDNVPROC)(GLuint index, GLdouble n, GLdouble f);
|
||||
GLAPI PFNGLDEPTHRANGEINDEXEDDNVPROC glad_glDepthRangeIndexeddNV;
|
||||
#define glDepthRangeIndexeddNV glad_glDepthRangeIndexeddNV
|
||||
typedef void (APIENTRYP PFNGLGETFLOATI_VPROC)(GLenum target, GLuint index, GLfloat *data);
|
||||
GLAPI PFNGLGETFLOATI_VPROC glad_glGetFloati_v;
|
||||
#define glGetFloati_v glad_glGetFloati_v
|
||||
|
||||
2
externals/glad/src/glad.c
vendored
2
externals/glad/src/glad.c
vendored
@@ -1044,6 +1044,7 @@ PFNGLDEPTHMASKPROC glad_glDepthMask = NULL;
|
||||
PFNGLDEPTHRANGEPROC glad_glDepthRange = NULL;
|
||||
PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv = NULL;
|
||||
PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed = NULL;
|
||||
PFNGLDEPTHRANGEINDEXEDDNVPROC glad_glDepthRangeIndexeddNV = NULL;
|
||||
PFNGLDEPTHRANGEFPROC glad_glDepthRangef = NULL;
|
||||
PFNGLDETACHSHADERPROC glad_glDetachShader = NULL;
|
||||
PFNGLDISABLEPROC glad_glDisable = NULL;
|
||||
@@ -7971,6 +7972,7 @@ static void load_GL_NV_depth_buffer_float(GLADloadproc load) {
|
||||
glad_glDepthRangedNV = (PFNGLDEPTHRANGEDNVPROC)load("glDepthRangedNV");
|
||||
glad_glClearDepthdNV = (PFNGLCLEARDEPTHDNVPROC)load("glClearDepthdNV");
|
||||
glad_glDepthBoundsdNV = (PFNGLDEPTHBOUNDSDNVPROC)load("glDepthBoundsdNV");
|
||||
glad_glDepthRangeIndexeddNV = (PFNGLDEPTHRANGEINDEXEDDNVPROC)load("glDepthRangeIndexeddNV");
|
||||
}
|
||||
static void load_GL_NV_draw_texture(GLADloadproc load) {
|
||||
if(!GLAD_GL_NV_draw_texture) return;
|
||||
|
||||
5
externals/libusb/CMakeLists.txt
vendored
5
externals/libusb/CMakeLists.txt
vendored
@@ -1,3 +1,8 @@
|
||||
# Ensure libusb compiles with UTF-8 encoding on MSVC
|
||||
if(MSVC)
|
||||
add_compile_options(/utf-8)
|
||||
endif()
|
||||
|
||||
add_library(usb STATIC EXCLUDE_FROM_ALL
|
||||
libusb/libusb/core.c
|
||||
libusb/libusb/core.c
|
||||
|
||||
2
externals/mbedtls
vendored
2
externals/mbedtls
vendored
Submodule externals/mbedtls updated: a280e602f3...eac2416b8f
@@ -27,6 +27,7 @@ if (MSVC)
|
||||
# /Zo - Enhanced debug info for optimized builds
|
||||
# /permissive- - Enables stricter C++ standards conformance checks
|
||||
# /EHsc - C++-only exception handling semantics
|
||||
# /utf-8 - Set source and execution character sets to UTF-8
|
||||
# /volatile:iso - Use strict standards-compliant volatile semantics.
|
||||
# /Zc:externConstexpr - Allow extern constexpr variables to have external linkage, like the standard mandates
|
||||
# /Zc:inline - Let codegen omit inline functions in object files
|
||||
@@ -38,6 +39,7 @@ if (MSVC)
|
||||
/permissive-
|
||||
/EHsc
|
||||
/std:c++latest
|
||||
/utf-8
|
||||
/volatile:iso
|
||||
/Zc:externConstexpr
|
||||
/Zc:inline
|
||||
|
||||
@@ -15,6 +15,8 @@ add_library(audio_core STATIC
|
||||
command_generator.cpp
|
||||
command_generator.h
|
||||
common.h
|
||||
delay_line.cpp
|
||||
delay_line.h
|
||||
effect_context.cpp
|
||||
effect_context.h
|
||||
info_updater.cpp
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cmath>
|
||||
#include <numbers>
|
||||
#include "audio_core/algorithm/interpolate.h"
|
||||
#include "audio_core/command_generator.h"
|
||||
#include "audio_core/effect_context.h"
|
||||
@@ -13,6 +15,20 @@ namespace AudioCore {
|
||||
namespace {
|
||||
constexpr std::size_t MIX_BUFFER_SIZE = 0x3f00;
|
||||
constexpr std::size_t SCALED_MIX_BUFFER_SIZE = MIX_BUFFER_SIZE << 15ULL;
|
||||
using DelayLineTimes = std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT>;
|
||||
|
||||
constexpr DelayLineTimes FDN_MIN_DELAY_LINE_TIMES{5.0f, 6.0f, 13.0f, 14.0f};
|
||||
constexpr DelayLineTimes FDN_MAX_DELAY_LINE_TIMES{45.704f, 82.782f, 149.94f, 271.58f};
|
||||
constexpr DelayLineTimes DECAY0_MAX_DELAY_LINE_TIMES{17.0f, 13.0f, 9.0f, 7.0f};
|
||||
constexpr DelayLineTimes DECAY1_MAX_DELAY_LINE_TIMES{19.0f, 11.0f, 10.0f, 6.0f};
|
||||
constexpr std::array<f32, AudioCommon::I3DL2REVERB_TAPS> EARLY_TAP_TIMES{
|
||||
0.017136f, 0.059154f, 0.161733f, 0.390186f, 0.425262f, 0.455411f, 0.689737f,
|
||||
0.745910f, 0.833844f, 0.859502f, 0.000000f, 0.075024f, 0.168788f, 0.299901f,
|
||||
0.337443f, 0.371903f, 0.599011f, 0.716741f, 0.817859f, 0.851664f};
|
||||
constexpr std::array<f32, AudioCommon::I3DL2REVERB_TAPS> EARLY_GAIN{
|
||||
0.67096f, 0.61027f, 1.0f, 0.35680f, 0.68361f, 0.65978f, 0.51939f,
|
||||
0.24712f, 0.45945f, 0.45021f, 0.64196f, 0.54879f, 0.92925f, 0.38270f,
|
||||
0.72867f, 0.69794f, 0.5464f, 0.24563f, 0.45214f, 0.44042f};
|
||||
|
||||
template <std::size_t N>
|
||||
void ApplyMix(s32* output, const s32* input, s32 gain, s32 sample_count) {
|
||||
@@ -65,6 +81,154 @@ s32 ApplyMixDepop(s32* output, s32 first_sample, s32 delta, s32 sample_count) {
|
||||
}
|
||||
}
|
||||
|
||||
float Pow10(float x) {
|
||||
if (x >= 0.0f) {
|
||||
return 1.0f;
|
||||
} else if (x <= -5.3f) {
|
||||
return 0.0f;
|
||||
}
|
||||
return std::pow(10.0f, x);
|
||||
}
|
||||
|
||||
float SinD(float degrees) {
|
||||
return std::sin(degrees * std::numbers::pi_v<float> / 180.0f);
|
||||
}
|
||||
|
||||
float CosD(float degrees) {
|
||||
return std::cos(degrees * std::numbers::pi_v<float> / 180.0f);
|
||||
}
|
||||
|
||||
float ToFloat(s32 sample) {
|
||||
return static_cast<float>(sample) / 65536.f;
|
||||
}
|
||||
|
||||
s32 ToS32(float sample) {
|
||||
constexpr auto min = -8388608.0f;
|
||||
constexpr auto max = 8388607.f;
|
||||
float rescaled_sample = sample * 65536.0f;
|
||||
if (rescaled_sample < min) {
|
||||
rescaled_sample = min;
|
||||
}
|
||||
if (rescaled_sample > max) {
|
||||
rescaled_sample = max;
|
||||
}
|
||||
return static_cast<s32>(rescaled_sample);
|
||||
}
|
||||
|
||||
constexpr std::array<std::size_t, 20> REVERB_TAP_INDEX_1CH{0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
constexpr std::array<std::size_t, 20> REVERB_TAP_INDEX_2CH{0, 0, 0, 1, 1, 1, 1, 0, 0, 0,
|
||||
1, 1, 1, 0, 0, 0, 0, 1, 1, 1};
|
||||
|
||||
constexpr std::array<std::size_t, 20> REVERB_TAP_INDEX_4CH{0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
|
||||
1, 1, 1, 0, 0, 0, 0, 3, 3, 3};
|
||||
|
||||
constexpr std::array<std::size_t, 20> REVERB_TAP_INDEX_6CH{4, 0, 0, 1, 1, 1, 1, 2, 2, 2,
|
||||
1, 1, 1, 0, 0, 0, 0, 3, 3, 3};
|
||||
|
||||
template <std::size_t CHANNEL_COUNT>
|
||||
void ApplyReverbGeneric(I3dl2ReverbState& state,
|
||||
const std::array<const s32*, AudioCommon::MAX_CHANNEL_COUNT>& input,
|
||||
const std::array<s32*, AudioCommon::MAX_CHANNEL_COUNT>& output,
|
||||
s32 sample_count) {
|
||||
|
||||
auto GetTapLookup = []() {
|
||||
if constexpr (CHANNEL_COUNT == 1) {
|
||||
return REVERB_TAP_INDEX_1CH;
|
||||
} else if constexpr (CHANNEL_COUNT == 2) {
|
||||
return REVERB_TAP_INDEX_2CH;
|
||||
} else if constexpr (CHANNEL_COUNT == 4) {
|
||||
return REVERB_TAP_INDEX_4CH;
|
||||
} else if constexpr (CHANNEL_COUNT == 6) {
|
||||
return REVERB_TAP_INDEX_6CH;
|
||||
}
|
||||
};
|
||||
|
||||
const auto& tap_index_lut = GetTapLookup();
|
||||
for (s32 sample = 0; sample < sample_count; sample++) {
|
||||
std::array<f32, CHANNEL_COUNT> out_samples{};
|
||||
std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> fsamp{};
|
||||
std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> mixed{};
|
||||
std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> osamp{};
|
||||
|
||||
// Mix everything into a single sample
|
||||
s32 temp_mixed_sample = 0;
|
||||
for (std::size_t i = 0; i < CHANNEL_COUNT; i++) {
|
||||
temp_mixed_sample += input[i][sample];
|
||||
}
|
||||
const auto current_sample = ToFloat(temp_mixed_sample);
|
||||
const auto early_tap = state.early_delay_line.TapOut(state.early_to_late_taps);
|
||||
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_TAPS; i++) {
|
||||
const auto tapped_samp =
|
||||
state.early_delay_line.TapOut(state.early_tap_steps[i]) * EARLY_GAIN[i];
|
||||
out_samples[tap_index_lut[i]] += tapped_samp;
|
||||
|
||||
if constexpr (CHANNEL_COUNT == 6) {
|
||||
// handle lfe
|
||||
out_samples[5] += tapped_samp;
|
||||
}
|
||||
}
|
||||
|
||||
state.lowpass_0 = current_sample * state.lowpass_2 + state.lowpass_0 * state.lowpass_1;
|
||||
state.early_delay_line.Tick(state.lowpass_0);
|
||||
|
||||
for (std::size_t i = 0; i < CHANNEL_COUNT; i++) {
|
||||
out_samples[i] *= state.early_gain;
|
||||
}
|
||||
|
||||
// Two channel seems to apply a latet gain, we require to save this
|
||||
f32 filter{};
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
filter = state.fdn_delay_line[i].GetOutputSample();
|
||||
const auto computed = filter * state.lpf_coefficients[0][i] + state.shelf_filter[i];
|
||||
state.shelf_filter[i] =
|
||||
filter * state.lpf_coefficients[1][i] + computed * state.lpf_coefficients[2][i];
|
||||
fsamp[i] = computed;
|
||||
}
|
||||
|
||||
// Mixing matrix
|
||||
mixed[0] = fsamp[1] + fsamp[2];
|
||||
mixed[1] = -fsamp[0] - fsamp[3];
|
||||
mixed[2] = fsamp[0] - fsamp[3];
|
||||
mixed[3] = fsamp[1] - fsamp[2];
|
||||
|
||||
if constexpr (CHANNEL_COUNT == 2) {
|
||||
for (auto& mix : mixed) {
|
||||
mix *= (filter * state.late_gain);
|
||||
}
|
||||
}
|
||||
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
const auto late = early_tap * state.late_gain;
|
||||
osamp[i] = state.decay_delay_line0[i].Tick(late + mixed[i]);
|
||||
osamp[i] = state.decay_delay_line1[i].Tick(osamp[i]);
|
||||
state.fdn_delay_line[i].Tick(osamp[i]);
|
||||
}
|
||||
|
||||
if constexpr (CHANNEL_COUNT == 1) {
|
||||
output[0][sample] = ToS32(state.dry_gain * ToFloat(input[0][sample]) +
|
||||
(out_samples[0] + osamp[0] + osamp[1]));
|
||||
} else if constexpr (CHANNEL_COUNT == 2 || CHANNEL_COUNT == 4) {
|
||||
for (std::size_t i = 0; i < CHANNEL_COUNT; i++) {
|
||||
output[i][sample] =
|
||||
ToS32(state.dry_gain * ToFloat(input[i][sample]) + (out_samples[i] + osamp[i]));
|
||||
}
|
||||
} else if constexpr (CHANNEL_COUNT == 6) {
|
||||
const auto temp_center = state.center_delay_line.Tick(0.5f * (osamp[2] - osamp[3]));
|
||||
for (std::size_t i = 0; i < 4; i++) {
|
||||
output[i][sample] =
|
||||
ToS32(state.dry_gain * ToFloat(input[i][sample]) + (out_samples[i] + osamp[i]));
|
||||
}
|
||||
output[4][sample] =
|
||||
ToS32(state.dry_gain * ToFloat(input[4][sample]) + (out_samples[4] + temp_center));
|
||||
output[5][sample] =
|
||||
ToS32(state.dry_gain * ToFloat(input[5][sample]) + (out_samples[5] + osamp[3]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
CommandGenerator::CommandGenerator(AudioCommon::AudioRendererParameter& worker_params_,
|
||||
@@ -271,11 +435,10 @@ void CommandGenerator::GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voic
|
||||
}
|
||||
|
||||
// Generate biquad filter
|
||||
// GenerateBiquadFilterCommand(mix_buffer_count, biquad_filter,
|
||||
// dsp_state.biquad_filter_state,
|
||||
// mix_buffer_count + channel, mix_buffer_count +
|
||||
// channel, worker_params.sample_count,
|
||||
// voice_info.GetInParams().node_id);
|
||||
// GenerateBiquadFilterCommand(mix_buffer_count, biquad_filter,
|
||||
// dsp_state.biquad_filter_state,
|
||||
// mix_buffer_count + channel, mix_buffer_count + channel,
|
||||
// worker_params.sample_count, voice_info.GetInParams().node_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -376,21 +539,54 @@ void CommandGenerator::GenerateEffectCommand(ServerMixInfo& mix_info) {
|
||||
|
||||
void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info,
|
||||
bool enabled) {
|
||||
if (!enabled) {
|
||||
auto* reverb = dynamic_cast<EffectI3dl2Reverb*>(info);
|
||||
const auto& params = reverb->GetParams();
|
||||
auto& state = reverb->GetState();
|
||||
const auto channel_count = params.channel_count;
|
||||
|
||||
if (channel_count != 1 && channel_count != 2 && channel_count != 4 && channel_count != 6) {
|
||||
return;
|
||||
}
|
||||
const auto& params = dynamic_cast<EffectI3dl2Reverb*>(info)->GetParams();
|
||||
const auto channel_count = params.channel_count;
|
||||
|
||||
std::array<const s32*, AudioCommon::MAX_CHANNEL_COUNT> input{};
|
||||
std::array<s32*, AudioCommon::MAX_CHANNEL_COUNT> output{};
|
||||
|
||||
const auto status = params.status;
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
// TODO(ogniK): Actually implement reverb
|
||||
/*
|
||||
if (params.input[i] != params.output[i]) {
|
||||
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
|
||||
}*/
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
std::memset(output, 0, worker_params.sample_count * sizeof(s32));
|
||||
input[i] = GetMixBuffer(mix_buffer_offset + params.input[i]);
|
||||
output[i] = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
}
|
||||
|
||||
if (enabled) {
|
||||
if (status == ParameterStatus::Initialized) {
|
||||
InitializeI3dl2Reverb(reverb->GetParams(), state, info->GetWorkBuffer());
|
||||
} else if (status == ParameterStatus::Updating) {
|
||||
UpdateI3dl2Reverb(reverb->GetParams(), state, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (enabled) {
|
||||
switch (channel_count) {
|
||||
case 1:
|
||||
ApplyReverbGeneric<1>(state, input, output, worker_params.sample_count);
|
||||
break;
|
||||
case 2:
|
||||
ApplyReverbGeneric<2>(state, input, output, worker_params.sample_count);
|
||||
break;
|
||||
case 4:
|
||||
ApplyReverbGeneric<4>(state, input, output, worker_params.sample_count);
|
||||
break;
|
||||
case 6:
|
||||
ApplyReverbGeneric<6>(state, input, output, worker_params.sample_count);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
// Only copy if the buffer input and output do not match!
|
||||
if ((mix_buffer_offset + params.input[i]) != (mix_buffer_offset + params.output[i])) {
|
||||
std::memcpy(output[i], input[i], worker_params.sample_count * sizeof(s32));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -528,6 +724,133 @@ s32 CommandGenerator::ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u3
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
void CommandGenerator::InitializeI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state,
|
||||
std::vector<u8>& work_buffer) {
|
||||
// Reset state
|
||||
state.lowpass_0 = 0.0f;
|
||||
state.lowpass_1 = 0.0f;
|
||||
state.lowpass_2 = 0.0f;
|
||||
|
||||
state.early_delay_line.Reset();
|
||||
state.early_tap_steps.fill(0);
|
||||
state.early_gain = 0.0f;
|
||||
state.late_gain = 0.0f;
|
||||
state.early_to_late_taps = 0;
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
state.fdn_delay_line[i].Reset();
|
||||
state.decay_delay_line0[i].Reset();
|
||||
state.decay_delay_line1[i].Reset();
|
||||
}
|
||||
state.last_reverb_echo = 0.0f;
|
||||
state.center_delay_line.Reset();
|
||||
for (auto& coef : state.lpf_coefficients) {
|
||||
coef.fill(0.0f);
|
||||
}
|
||||
state.shelf_filter.fill(0.0f);
|
||||
state.dry_gain = 0.0f;
|
||||
|
||||
const auto sample_rate = info.sample_rate / 1000;
|
||||
f32* work_buffer_ptr = reinterpret_cast<f32*>(work_buffer.data());
|
||||
|
||||
s32 delay_samples{};
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
delay_samples =
|
||||
AudioCommon::CalculateDelaySamples(sample_rate, FDN_MAX_DELAY_LINE_TIMES[i]);
|
||||
state.fdn_delay_line[i].Initialize(delay_samples, work_buffer_ptr);
|
||||
work_buffer_ptr += delay_samples + 1;
|
||||
|
||||
delay_samples =
|
||||
AudioCommon::CalculateDelaySamples(sample_rate, DECAY0_MAX_DELAY_LINE_TIMES[i]);
|
||||
state.decay_delay_line0[i].Initialize(delay_samples, 0.0f, work_buffer_ptr);
|
||||
work_buffer_ptr += delay_samples + 1;
|
||||
|
||||
delay_samples =
|
||||
AudioCommon::CalculateDelaySamples(sample_rate, DECAY1_MAX_DELAY_LINE_TIMES[i]);
|
||||
state.decay_delay_line1[i].Initialize(delay_samples, 0.0f, work_buffer_ptr);
|
||||
work_buffer_ptr += delay_samples + 1;
|
||||
}
|
||||
delay_samples = AudioCommon::CalculateDelaySamples(sample_rate, 5.0f);
|
||||
state.center_delay_line.Initialize(delay_samples, work_buffer_ptr);
|
||||
work_buffer_ptr += delay_samples + 1;
|
||||
|
||||
delay_samples = AudioCommon::CalculateDelaySamples(sample_rate, 400.0f);
|
||||
state.early_delay_line.Initialize(delay_samples, work_buffer_ptr);
|
||||
|
||||
UpdateI3dl2Reverb(info, state, true);
|
||||
}
|
||||
|
||||
void CommandGenerator::UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state,
|
||||
bool should_clear) {
|
||||
|
||||
state.dry_gain = info.dry_gain;
|
||||
state.shelf_filter.fill(0.0f);
|
||||
state.lowpass_0 = 0.0f;
|
||||
state.early_gain = Pow10(std::min(info.room + info.reflection, 5000.0f) / 2000.0f);
|
||||
state.late_gain = Pow10(std::min(info.room + info.reverb, 5000.0f) / 2000.0f);
|
||||
|
||||
const auto sample_rate = info.sample_rate / 1000;
|
||||
const f32 hf_gain = Pow10(info.room_hf / 2000.0f);
|
||||
if (hf_gain >= 1.0f) {
|
||||
state.lowpass_2 = 1.0f;
|
||||
state.lowpass_1 = 0.0f;
|
||||
} else {
|
||||
const auto a = 1.0f - hf_gain;
|
||||
const auto b = 2.0f * (1.0f - hf_gain * CosD(256.0f * info.hf_reference /
|
||||
static_cast<f32>(info.sample_rate)));
|
||||
const auto c = std::sqrt(b * b - 4.0f * a * a);
|
||||
|
||||
state.lowpass_1 = (b - c) / (2.0f * a);
|
||||
state.lowpass_2 = 1.0f - state.lowpass_1;
|
||||
}
|
||||
state.early_to_late_taps = AudioCommon::CalculateDelaySamples(
|
||||
sample_rate, 1000.0f * (info.reflection_delay + info.reverb_delay));
|
||||
|
||||
state.last_reverb_echo = 0.6f * info.diffusion * 0.01f;
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
const auto length =
|
||||
FDN_MIN_DELAY_LINE_TIMES[i] +
|
||||
(info.density / 100.0f) * (FDN_MAX_DELAY_LINE_TIMES[i] - FDN_MIN_DELAY_LINE_TIMES[i]);
|
||||
state.fdn_delay_line[i].SetDelay(AudioCommon::CalculateDelaySamples(sample_rate, length));
|
||||
|
||||
const auto delay_sample_counts = state.fdn_delay_line[i].GetDelay() +
|
||||
state.decay_delay_line0[i].GetDelay() +
|
||||
state.decay_delay_line1[i].GetDelay();
|
||||
|
||||
float a = (-60.0f * static_cast<f32>(delay_sample_counts)) /
|
||||
(info.decay_time * static_cast<f32>(info.sample_rate));
|
||||
float b = a / info.hf_decay_ratio;
|
||||
float c = CosD(128.0f * 0.5f * info.hf_reference / static_cast<f32>(info.sample_rate)) /
|
||||
SinD(128.0f * 0.5f * info.hf_reference / static_cast<f32>(info.sample_rate));
|
||||
float d = Pow10((b - a) / 40.0f);
|
||||
float e = Pow10((b + a) / 40.0f) * 0.7071f;
|
||||
|
||||
state.lpf_coefficients[0][i] = e * ((d * c) + 1.0f) / (c + d);
|
||||
state.lpf_coefficients[1][i] = e * (1.0f - (d * c)) / (c + d);
|
||||
state.lpf_coefficients[2][i] = (c - d) / (c + d);
|
||||
|
||||
state.decay_delay_line0[i].SetCoefficient(state.last_reverb_echo);
|
||||
state.decay_delay_line1[i].SetCoefficient(-0.9f * state.last_reverb_echo);
|
||||
}
|
||||
|
||||
if (should_clear) {
|
||||
for (std::size_t i = 0; i < AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT; i++) {
|
||||
state.fdn_delay_line[i].Clear();
|
||||
state.decay_delay_line0[i].Clear();
|
||||
state.decay_delay_line1[i].Clear();
|
||||
}
|
||||
state.early_delay_line.Clear();
|
||||
state.center_delay_line.Clear();
|
||||
}
|
||||
|
||||
const auto max_early_delay = state.early_delay_line.GetMaxDelay();
|
||||
const auto reflection_time = 1000.0f * (0.0098f * info.reverb_delay + 0.02f);
|
||||
for (std::size_t tap = 0; tap < AudioCommon::I3DL2REVERB_TAPS; tap++) {
|
||||
const auto length = AudioCommon::CalculateDelaySamples(
|
||||
sample_rate, 1000.0f * info.reflection_delay + reflection_time * EARLY_TAP_TIMES[tap]);
|
||||
state.early_tap_steps[tap] = std::min(length, max_early_delay);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandGenerator::GenerateVolumeRampCommand(float last_volume, float current_volume,
|
||||
s32 channel, s32 node_id) {
|
||||
const auto last = static_cast<s32>(last_volume * 32768.0f);
|
||||
|
||||
@@ -21,6 +21,8 @@ class ServerMixInfo;
|
||||
class EffectContext;
|
||||
class EffectBase;
|
||||
struct AuxInfoDSP;
|
||||
struct I3dl2ReverbParams;
|
||||
struct I3dl2ReverbState;
|
||||
using MixVolumeBuffer = std::array<float, AudioCommon::MAX_MIX_BUFFERS>;
|
||||
|
||||
class CommandGenerator {
|
||||
@@ -80,6 +82,9 @@ private:
|
||||
s32 ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples, s32* out_data,
|
||||
u32 sample_count, u32 read_offset, u32 read_count);
|
||||
|
||||
void InitializeI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state,
|
||||
std::vector<u8>& work_buffer);
|
||||
void UpdateI3dl2Reverb(I3dl2ReverbParams& info, I3dl2ReverbState& state, bool should_clear);
|
||||
// DSP Code
|
||||
s32 DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count,
|
||||
s32 channel, std::size_t mix_offset);
|
||||
|
||||
@@ -33,6 +33,29 @@ constexpr std::size_t TEMP_MIX_BASE_SIZE = 0x3f00; // TODO(ogniK): Work out this
|
||||
// and our const ends up being 0x3f04, the 4 bytes are most
|
||||
// likely the sample history
|
||||
constexpr std::size_t TOTAL_TEMP_MIX_SIZE = TEMP_MIX_BASE_SIZE + AudioCommon::MAX_SAMPLE_HISTORY;
|
||||
constexpr f32 I3DL2REVERB_MAX_LEVEL = 5000.0f;
|
||||
constexpr f32 I3DL2REVERB_MIN_REFLECTION_DURATION = 0.02f;
|
||||
constexpr std::size_t I3DL2REVERB_TAPS = 20;
|
||||
constexpr std::size_t I3DL2REVERB_DELAY_LINE_COUNT = 4;
|
||||
using Fractional = s32;
|
||||
|
||||
template <typename T>
|
||||
constexpr Fractional ToFractional(T x) {
|
||||
return static_cast<Fractional>(x * static_cast<T>(0x4000));
|
||||
}
|
||||
|
||||
constexpr Fractional MultiplyFractional(Fractional lhs, Fractional rhs) {
|
||||
return static_cast<Fractional>(static_cast<s64>(lhs) * rhs >> 14);
|
||||
}
|
||||
|
||||
constexpr s32 FractionalToFixed(Fractional x) {
|
||||
const auto s = x & (1 << 13);
|
||||
return static_cast<s32>(x >> 14) + s;
|
||||
}
|
||||
|
||||
constexpr s32 CalculateDelaySamples(s32 sample_rate_khz, float time) {
|
||||
return FractionalToFixed(MultiplyFractional(ToFractional(sample_rate_khz), ToFractional(time)));
|
||||
}
|
||||
|
||||
static constexpr u32 VersionFromRevision(u32_le rev) {
|
||||
// "REV7" -> 7
|
||||
|
||||
104
src/audio_core/delay_line.cpp
Normal file
104
src/audio_core/delay_line.cpp
Normal file
@@ -0,0 +1,104 @@
|
||||
#include <cstring>
|
||||
#include "audio_core/delay_line.h"
|
||||
|
||||
namespace AudioCore {
|
||||
DelayLineBase::DelayLineBase() = default;
|
||||
DelayLineBase::~DelayLineBase() = default;
|
||||
|
||||
void DelayLineBase::Initialize(s32 max_delay_, float* src_buffer) {
|
||||
buffer = src_buffer;
|
||||
buffer_end = buffer + max_delay_;
|
||||
max_delay = max_delay_;
|
||||
output = buffer;
|
||||
SetDelay(max_delay_);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void DelayLineBase::SetDelay(s32 new_delay) {
|
||||
if (max_delay < new_delay) {
|
||||
return;
|
||||
}
|
||||
delay = new_delay;
|
||||
input = (buffer + ((output - buffer) + new_delay) % (max_delay + 1));
|
||||
}
|
||||
|
||||
s32 DelayLineBase::GetDelay() const {
|
||||
return delay;
|
||||
}
|
||||
|
||||
s32 DelayLineBase::GetMaxDelay() const {
|
||||
return max_delay;
|
||||
}
|
||||
|
||||
f32 DelayLineBase::TapOut(s32 last_sample) {
|
||||
const float* ptr = input - (last_sample + 1);
|
||||
if (ptr < buffer) {
|
||||
ptr += (max_delay + 1);
|
||||
}
|
||||
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
f32 DelayLineBase::Tick(f32 sample) {
|
||||
*(input++) = sample;
|
||||
const auto out_sample = *(output++);
|
||||
|
||||
if (buffer_end < input) {
|
||||
input = buffer;
|
||||
}
|
||||
|
||||
if (buffer_end < output) {
|
||||
output = buffer;
|
||||
}
|
||||
|
||||
return out_sample;
|
||||
}
|
||||
|
||||
float* DelayLineBase::GetInput() {
|
||||
return input;
|
||||
}
|
||||
|
||||
const float* DelayLineBase::GetInput() const {
|
||||
return input;
|
||||
}
|
||||
|
||||
f32 DelayLineBase::GetOutputSample() const {
|
||||
return *output;
|
||||
}
|
||||
|
||||
void DelayLineBase::Clear() {
|
||||
std::memset(buffer, 0, sizeof(float) * max_delay);
|
||||
}
|
||||
|
||||
void DelayLineBase::Reset() {
|
||||
buffer = nullptr;
|
||||
buffer_end = nullptr;
|
||||
max_delay = 0;
|
||||
input = nullptr;
|
||||
output = nullptr;
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
DelayLineAllPass::DelayLineAllPass() = default;
|
||||
DelayLineAllPass::~DelayLineAllPass() = default;
|
||||
|
||||
void DelayLineAllPass::Initialize(u32 delay_, float coeffcient_, f32* src_buffer) {
|
||||
DelayLineBase::Initialize(delay_, src_buffer);
|
||||
SetCoefficient(coeffcient_);
|
||||
}
|
||||
|
||||
void DelayLineAllPass::SetCoefficient(float coeffcient_) {
|
||||
coefficient = coeffcient_;
|
||||
}
|
||||
|
||||
f32 DelayLineAllPass::Tick(f32 sample) {
|
||||
const auto temp = sample - coefficient * *output;
|
||||
return coefficient * temp + DelayLineBase::Tick(temp);
|
||||
}
|
||||
|
||||
void DelayLineAllPass::Reset() {
|
||||
coefficient = 0.0f;
|
||||
DelayLineBase::Reset();
|
||||
}
|
||||
|
||||
} // namespace AudioCore
|
||||
46
src/audio_core/delay_line.h
Normal file
46
src/audio_core/delay_line.h
Normal file
@@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
class DelayLineBase {
|
||||
public:
|
||||
DelayLineBase();
|
||||
~DelayLineBase();
|
||||
|
||||
void Initialize(s32 max_delay_, float* src_buffer);
|
||||
void SetDelay(s32 new_delay);
|
||||
s32 GetDelay() const;
|
||||
s32 GetMaxDelay() const;
|
||||
f32 TapOut(s32 last_sample);
|
||||
f32 Tick(f32 sample);
|
||||
float* GetInput();
|
||||
const float* GetInput() const;
|
||||
f32 GetOutputSample() const;
|
||||
void Clear();
|
||||
void Reset();
|
||||
|
||||
protected:
|
||||
float* buffer{nullptr};
|
||||
float* buffer_end{nullptr};
|
||||
s32 max_delay{};
|
||||
float* input{nullptr};
|
||||
float* output{nullptr};
|
||||
s32 delay{};
|
||||
};
|
||||
|
||||
class DelayLineAllPass final : public DelayLineBase {
|
||||
public:
|
||||
DelayLineAllPass();
|
||||
~DelayLineAllPass();
|
||||
|
||||
void Initialize(u32 delay, float coeffcient_, f32* src_buffer);
|
||||
void SetCoefficient(float coeffcient_);
|
||||
f32 Tick(f32 sample);
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
float coefficient{};
|
||||
};
|
||||
} // namespace AudioCore
|
||||
@@ -90,6 +90,14 @@ s32 EffectBase::GetProcessingOrder() const {
|
||||
return processing_order;
|
||||
}
|
||||
|
||||
std::vector<u8>& EffectBase::GetWorkBuffer() {
|
||||
return work_buffer;
|
||||
}
|
||||
|
||||
const std::vector<u8>& EffectBase::GetWorkBuffer() const {
|
||||
return work_buffer;
|
||||
}
|
||||
|
||||
EffectI3dl2Reverb::EffectI3dl2Reverb() : EffectGeneric(EffectType::I3dl2Reverb) {}
|
||||
EffectI3dl2Reverb::~EffectI3dl2Reverb() = default;
|
||||
|
||||
@@ -117,6 +125,12 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
|
||||
usage = UsageState::Initialized;
|
||||
params.status = ParameterStatus::Initialized;
|
||||
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
|
||||
if (!skipped) {
|
||||
auto& cur_work_buffer = GetWorkBuffer();
|
||||
// Has two buffers internally
|
||||
cur_work_buffer.resize(in_params.buffer_size * 2);
|
||||
std::fill(cur_work_buffer.begin(), cur_work_buffer.end(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,6 +143,14 @@ void EffectI3dl2Reverb::UpdateForCommandGeneration() {
|
||||
GetParams().status = ParameterStatus::Updated;
|
||||
}
|
||||
|
||||
I3dl2ReverbState& EffectI3dl2Reverb::GetState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
const I3dl2ReverbState& EffectI3dl2Reverb::GetState() const {
|
||||
return state;
|
||||
}
|
||||
|
||||
EffectBiquadFilter::EffectBiquadFilter() : EffectGeneric(EffectType::BiquadFilter) {}
|
||||
EffectBiquadFilter::~EffectBiquadFilter() = default;
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "audio_core/common.h"
|
||||
#include "audio_core/delay_line.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
@@ -194,6 +195,8 @@ public:
|
||||
[[nodiscard]] bool IsEnabled() const;
|
||||
[[nodiscard]] s32 GetMixID() const;
|
||||
[[nodiscard]] s32 GetProcessingOrder() const;
|
||||
[[nodiscard]] std::vector<u8>& GetWorkBuffer();
|
||||
[[nodiscard]] const std::vector<u8>& GetWorkBuffer() const;
|
||||
|
||||
protected:
|
||||
UsageState usage{UsageState::Invalid};
|
||||
@@ -201,6 +204,7 @@ protected:
|
||||
s32 mix_id{};
|
||||
s32 processing_order{};
|
||||
bool enabled = false;
|
||||
std::vector<u8> work_buffer{};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
@@ -212,7 +216,7 @@ public:
|
||||
return internal_params;
|
||||
}
|
||||
|
||||
const I3dl2ReverbParams& GetParams() const {
|
||||
const T& GetParams() const {
|
||||
return internal_params;
|
||||
}
|
||||
|
||||
@@ -229,6 +233,27 @@ public:
|
||||
void UpdateForCommandGeneration() override;
|
||||
};
|
||||
|
||||
struct I3dl2ReverbState {
|
||||
f32 lowpass_0{};
|
||||
f32 lowpass_1{};
|
||||
f32 lowpass_2{};
|
||||
|
||||
DelayLineBase early_delay_line{};
|
||||
std::array<u32, AudioCommon::I3DL2REVERB_TAPS> early_tap_steps{};
|
||||
f32 early_gain{};
|
||||
f32 late_gain{};
|
||||
|
||||
u32 early_to_late_taps{};
|
||||
std::array<DelayLineBase, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> fdn_delay_line{};
|
||||
std::array<DelayLineAllPass, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> decay_delay_line0{};
|
||||
std::array<DelayLineAllPass, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> decay_delay_line1{};
|
||||
f32 last_reverb_echo{};
|
||||
DelayLineBase center_delay_line{};
|
||||
std::array<std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT>, 3> lpf_coefficients{};
|
||||
std::array<f32, AudioCommon::I3DL2REVERB_DELAY_LINE_COUNT> shelf_filter{};
|
||||
f32 dry_gain{};
|
||||
};
|
||||
|
||||
class EffectI3dl2Reverb : public EffectGeneric<I3dl2ReverbParams> {
|
||||
public:
|
||||
explicit EffectI3dl2Reverb();
|
||||
@@ -237,8 +262,12 @@ public:
|
||||
void Update(EffectInfo::InParams& in_params) override;
|
||||
void UpdateForCommandGeneration() override;
|
||||
|
||||
I3dl2ReverbState& GetState();
|
||||
const I3dl2ReverbState& GetState() const;
|
||||
|
||||
private:
|
||||
bool skipped = false;
|
||||
I3dl2ReverbState state{};
|
||||
};
|
||||
|
||||
class EffectBiquadFilter : public EffectGeneric<BiquadFilterParams> {
|
||||
|
||||
@@ -167,8 +167,8 @@ add_library(common STATIC
|
||||
threadsafe_queue.h
|
||||
time_zone.cpp
|
||||
time_zone.h
|
||||
tiny_mt.h
|
||||
tree.h
|
||||
uint128.cpp
|
||||
uint128.h
|
||||
uuid.cpp
|
||||
uuid.h
|
||||
|
||||
@@ -42,6 +42,11 @@ requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, si
|
||||
return (value & mask) == 0;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
requires std::is_integral_v<T>[[nodiscard]] constexpr T DivideUp(T x, U y) {
|
||||
return (x + (y - 1)) / y;
|
||||
}
|
||||
|
||||
template <typename T, size_t Align = 16>
|
||||
class AlignmentAllocator {
|
||||
public:
|
||||
|
||||
@@ -28,8 +28,10 @@
|
||||
// compromising on hash quality.
|
||||
|
||||
#include <algorithm>
|
||||
#include <string.h> // for memcpy and memset
|
||||
#include "cityhash.h"
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include "common/cityhash.h"
|
||||
#include "common/swap.h"
|
||||
|
||||
// #include "config.h"
|
||||
@@ -42,21 +44,17 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
typedef uint8_t uint8;
|
||||
typedef uint32_t uint32;
|
||||
typedef uint64_t uint64;
|
||||
|
||||
namespace Common {
|
||||
|
||||
static uint64 UNALIGNED_LOAD64(const char* p) {
|
||||
uint64 result;
|
||||
memcpy(&result, p, sizeof(result));
|
||||
static u64 unaligned_load64(const char* p) {
|
||||
u64 result;
|
||||
std::memcpy(&result, p, sizeof(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
static uint32 UNALIGNED_LOAD32(const char* p) {
|
||||
uint32 result;
|
||||
memcpy(&result, p, sizeof(result));
|
||||
static u32 unaligned_load32(const char* p) {
|
||||
u32 result;
|
||||
std::memcpy(&result, p, sizeof(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -76,64 +74,64 @@ static uint32 UNALIGNED_LOAD32(const char* p) {
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static uint64 Fetch64(const char* p) {
|
||||
return uint64_in_expected_order(UNALIGNED_LOAD64(p));
|
||||
static u64 Fetch64(const char* p) {
|
||||
return uint64_in_expected_order(unaligned_load64(p));
|
||||
}
|
||||
|
||||
static uint32 Fetch32(const char* p) {
|
||||
return uint32_in_expected_order(UNALIGNED_LOAD32(p));
|
||||
static u32 Fetch32(const char* p) {
|
||||
return uint32_in_expected_order(unaligned_load32(p));
|
||||
}
|
||||
|
||||
// Some primes between 2^63 and 2^64 for various uses.
|
||||
static const uint64 k0 = 0xc3a5c85c97cb3127ULL;
|
||||
static const uint64 k1 = 0xb492b66fbe98f273ULL;
|
||||
static const uint64 k2 = 0x9ae16a3b2f90404fULL;
|
||||
static constexpr u64 k0 = 0xc3a5c85c97cb3127ULL;
|
||||
static constexpr u64 k1 = 0xb492b66fbe98f273ULL;
|
||||
static constexpr u64 k2 = 0x9ae16a3b2f90404fULL;
|
||||
|
||||
// Bitwise right rotate. Normally this will compile to a single
|
||||
// instruction, especially if the shift is a manifest constant.
|
||||
static uint64 Rotate(uint64 val, int shift) {
|
||||
static u64 Rotate(u64 val, int shift) {
|
||||
// Avoid shifting by 64: doing so yields an undefined result.
|
||||
return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
|
||||
}
|
||||
|
||||
static uint64 ShiftMix(uint64 val) {
|
||||
static u64 ShiftMix(u64 val) {
|
||||
return val ^ (val >> 47);
|
||||
}
|
||||
|
||||
static uint64 HashLen16(uint64 u, uint64 v) {
|
||||
return Hash128to64(uint128(u, v));
|
||||
static u64 HashLen16(u64 u, u64 v) {
|
||||
return Hash128to64(u128{u, v});
|
||||
}
|
||||
|
||||
static uint64 HashLen16(uint64 u, uint64 v, uint64 mul) {
|
||||
static u64 HashLen16(u64 u, u64 v, u64 mul) {
|
||||
// Murmur-inspired hashing.
|
||||
uint64 a = (u ^ v) * mul;
|
||||
u64 a = (u ^ v) * mul;
|
||||
a ^= (a >> 47);
|
||||
uint64 b = (v ^ a) * mul;
|
||||
u64 b = (v ^ a) * mul;
|
||||
b ^= (b >> 47);
|
||||
b *= mul;
|
||||
return b;
|
||||
}
|
||||
|
||||
static uint64 HashLen0to16(const char* s, std::size_t len) {
|
||||
static u64 HashLen0to16(const char* s, size_t len) {
|
||||
if (len >= 8) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) + k2;
|
||||
uint64 b = Fetch64(s + len - 8);
|
||||
uint64 c = Rotate(b, 37) * mul + a;
|
||||
uint64 d = (Rotate(a, 25) + b) * mul;
|
||||
u64 mul = k2 + len * 2;
|
||||
u64 a = Fetch64(s) + k2;
|
||||
u64 b = Fetch64(s + len - 8);
|
||||
u64 c = Rotate(b, 37) * mul + a;
|
||||
u64 d = (Rotate(a, 25) + b) * mul;
|
||||
return HashLen16(c, d, mul);
|
||||
}
|
||||
if (len >= 4) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch32(s);
|
||||
u64 mul = k2 + len * 2;
|
||||
u64 a = Fetch32(s);
|
||||
return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
|
||||
}
|
||||
if (len > 0) {
|
||||
uint8 a = s[0];
|
||||
uint8 b = s[len >> 1];
|
||||
uint8 c = s[len - 1];
|
||||
uint32 y = static_cast<uint32>(a) + (static_cast<uint32>(b) << 8);
|
||||
uint32 z = static_cast<uint32>(len) + (static_cast<uint32>(c) << 2);
|
||||
u8 a = s[0];
|
||||
u8 b = s[len >> 1];
|
||||
u8 c = s[len - 1];
|
||||
u32 y = static_cast<u32>(a) + (static_cast<u32>(b) << 8);
|
||||
u32 z = static_cast<u32>(len) + (static_cast<u32>(c) << 2);
|
||||
return ShiftMix(y * k2 ^ z * k0) * k2;
|
||||
}
|
||||
return k2;
|
||||
@@ -141,22 +139,21 @@ static uint64 HashLen0to16(const char* s, std::size_t len) {
|
||||
|
||||
// This probably works well for 16-byte strings as well, but it may be overkill
|
||||
// in that case.
|
||||
static uint64 HashLen17to32(const char* s, std::size_t len) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) * k1;
|
||||
uint64 b = Fetch64(s + 8);
|
||||
uint64 c = Fetch64(s + len - 8) * mul;
|
||||
uint64 d = Fetch64(s + len - 16) * k2;
|
||||
static u64 HashLen17to32(const char* s, size_t len) {
|
||||
u64 mul = k2 + len * 2;
|
||||
u64 a = Fetch64(s) * k1;
|
||||
u64 b = Fetch64(s + 8);
|
||||
u64 c = Fetch64(s + len - 8) * mul;
|
||||
u64 d = Fetch64(s + len - 16) * k2;
|
||||
return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d, a + Rotate(b + k2, 18) + c, mul);
|
||||
}
|
||||
|
||||
// Return a 16-byte hash for 48 bytes. Quick and dirty.
|
||||
// Callers do best to use "random-looking" values for a and b.
|
||||
static pair<uint64, uint64> WeakHashLen32WithSeeds(uint64 w, uint64 x, uint64 y, uint64 z, uint64 a,
|
||||
uint64 b) {
|
||||
static pair<u64, u64> WeakHashLen32WithSeeds(u64 w, u64 x, u64 y, u64 z, u64 a, u64 b) {
|
||||
a += w;
|
||||
b = Rotate(b + a + z, 21);
|
||||
uint64 c = a;
|
||||
u64 c = a;
|
||||
a += x;
|
||||
a += y;
|
||||
b += Rotate(a, 44);
|
||||
@@ -164,34 +161,34 @@ static pair<uint64, uint64> WeakHashLen32WithSeeds(uint64 w, uint64 x, uint64 y,
|
||||
}
|
||||
|
||||
// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty.
|
||||
static pair<uint64, uint64> WeakHashLen32WithSeeds(const char* s, uint64 a, uint64 b) {
|
||||
static pair<u64, u64> WeakHashLen32WithSeeds(const char* s, u64 a, u64 b) {
|
||||
return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16), Fetch64(s + 24), a,
|
||||
b);
|
||||
}
|
||||
|
||||
// Return an 8-byte hash for 33 to 64 bytes.
|
||||
static uint64 HashLen33to64(const char* s, std::size_t len) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) * k2;
|
||||
uint64 b = Fetch64(s + 8);
|
||||
uint64 c = Fetch64(s + len - 24);
|
||||
uint64 d = Fetch64(s + len - 32);
|
||||
uint64 e = Fetch64(s + 16) * k2;
|
||||
uint64 f = Fetch64(s + 24) * 9;
|
||||
uint64 g = Fetch64(s + len - 8);
|
||||
uint64 h = Fetch64(s + len - 16) * mul;
|
||||
uint64 u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
|
||||
uint64 v = ((a + g) ^ d) + f + 1;
|
||||
uint64 w = swap64((u + v) * mul) + h;
|
||||
uint64 x = Rotate(e + f, 42) + c;
|
||||
uint64 y = (swap64((v + w) * mul) + g) * mul;
|
||||
uint64 z = e + f + c;
|
||||
static u64 HashLen33to64(const char* s, size_t len) {
|
||||
u64 mul = k2 + len * 2;
|
||||
u64 a = Fetch64(s) * k2;
|
||||
u64 b = Fetch64(s + 8);
|
||||
u64 c = Fetch64(s + len - 24);
|
||||
u64 d = Fetch64(s + len - 32);
|
||||
u64 e = Fetch64(s + 16) * k2;
|
||||
u64 f = Fetch64(s + 24) * 9;
|
||||
u64 g = Fetch64(s + len - 8);
|
||||
u64 h = Fetch64(s + len - 16) * mul;
|
||||
u64 u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
|
||||
u64 v = ((a + g) ^ d) + f + 1;
|
||||
u64 w = swap64((u + v) * mul) + h;
|
||||
u64 x = Rotate(e + f, 42) + c;
|
||||
u64 y = (swap64((v + w) * mul) + g) * mul;
|
||||
u64 z = e + f + c;
|
||||
a = swap64((x + z) * mul + y) + b;
|
||||
b = ShiftMix((z + a) * mul + d + h) * mul;
|
||||
return b + x;
|
||||
}
|
||||
|
||||
uint64 CityHash64(const char* s, std::size_t len) {
|
||||
u64 CityHash64(const char* s, size_t len) {
|
||||
if (len <= 32) {
|
||||
if (len <= 16) {
|
||||
return HashLen0to16(s, len);
|
||||
@@ -204,15 +201,15 @@ uint64 CityHash64(const char* s, std::size_t len) {
|
||||
|
||||
// For strings over 64 bytes we hash the end first, and then as we
|
||||
// loop we keep 56 bytes of state: v, w, x, y, and z.
|
||||
uint64 x = Fetch64(s + len - 40);
|
||||
uint64 y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
|
||||
uint64 z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
|
||||
pair<uint64, uint64> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
|
||||
pair<uint64, uint64> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
|
||||
u64 x = Fetch64(s + len - 40);
|
||||
u64 y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
|
||||
u64 z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
|
||||
pair<u64, u64> v = WeakHashLen32WithSeeds(s + len - 64, len, z);
|
||||
pair<u64, u64> w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
|
||||
x = x * k1 + Fetch64(s);
|
||||
|
||||
// Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
|
||||
len = (len - 1) & ~static_cast<std::size_t>(63);
|
||||
len = (len - 1) & ~static_cast<size_t>(63);
|
||||
do {
|
||||
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
|
||||
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
|
||||
@@ -229,21 +226,21 @@ uint64 CityHash64(const char* s, std::size_t len) {
|
||||
HashLen16(v.second, w.second) + x);
|
||||
}
|
||||
|
||||
uint64 CityHash64WithSeed(const char* s, std::size_t len, uint64 seed) {
|
||||
u64 CityHash64WithSeed(const char* s, size_t len, u64 seed) {
|
||||
return CityHash64WithSeeds(s, len, k2, seed);
|
||||
}
|
||||
|
||||
uint64 CityHash64WithSeeds(const char* s, std::size_t len, uint64 seed0, uint64 seed1) {
|
||||
u64 CityHash64WithSeeds(const char* s, size_t len, u64 seed0, u64 seed1) {
|
||||
return HashLen16(CityHash64(s, len) - seed0, seed1);
|
||||
}
|
||||
|
||||
// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
|
||||
// of any length representable in signed long. Based on City and Murmur.
|
||||
static uint128 CityMurmur(const char* s, std::size_t len, uint128 seed) {
|
||||
uint64 a = Uint128Low64(seed);
|
||||
uint64 b = Uint128High64(seed);
|
||||
uint64 c = 0;
|
||||
uint64 d = 0;
|
||||
static u128 CityMurmur(const char* s, size_t len, u128 seed) {
|
||||
u64 a = seed[0];
|
||||
u64 b = seed[1];
|
||||
u64 c = 0;
|
||||
u64 d = 0;
|
||||
signed long l = static_cast<long>(len) - 16;
|
||||
if (l <= 0) { // len <= 16
|
||||
a = ShiftMix(a * k1) * k1;
|
||||
@@ -266,20 +263,20 @@ static uint128 CityMurmur(const char* s, std::size_t len, uint128 seed) {
|
||||
}
|
||||
a = HashLen16(a, c);
|
||||
b = HashLen16(d, b);
|
||||
return uint128(a ^ b, HashLen16(b, a));
|
||||
return u128{a ^ b, HashLen16(b, a)};
|
||||
}
|
||||
|
||||
uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed) {
|
||||
u128 CityHash128WithSeed(const char* s, size_t len, u128 seed) {
|
||||
if (len < 128) {
|
||||
return CityMurmur(s, len, seed);
|
||||
}
|
||||
|
||||
// We expect len >= 128 to be the common case. Keep 56 bytes of state:
|
||||
// v, w, x, y, and z.
|
||||
pair<uint64, uint64> v, w;
|
||||
uint64 x = Uint128Low64(seed);
|
||||
uint64 y = Uint128High64(seed);
|
||||
uint64 z = len * k1;
|
||||
pair<u64, u64> v, w;
|
||||
u64 x = seed[0];
|
||||
u64 y = seed[1];
|
||||
u64 z = len * k1;
|
||||
v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s);
|
||||
v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8);
|
||||
w.first = Rotate(y + z, 35) * k1 + x;
|
||||
@@ -313,7 +310,7 @@ uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed) {
|
||||
w.first *= 9;
|
||||
v.first *= k0;
|
||||
// If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
|
||||
for (std::size_t tail_done = 0; tail_done < len;) {
|
||||
for (size_t tail_done = 0; tail_done < len;) {
|
||||
tail_done += 32;
|
||||
y = Rotate(x + y, 42) * k0 + v.second;
|
||||
w.first += Fetch64(s + len - tail_done + 16);
|
||||
@@ -328,13 +325,12 @@ uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed) {
|
||||
// different 56-byte-to-8-byte hashes to get a 16-byte final result.
|
||||
x = HashLen16(x, v.first);
|
||||
y = HashLen16(y + z, w.first);
|
||||
return uint128(HashLen16(x + v.second, w.second) + y, HashLen16(x + w.second, y + v.second));
|
||||
return u128{HashLen16(x + v.second, w.second) + y, HashLen16(x + w.second, y + v.second)};
|
||||
}
|
||||
|
||||
uint128 CityHash128(const char* s, std::size_t len) {
|
||||
return len >= 16
|
||||
? CityHash128WithSeed(s + 16, len - 16, uint128(Fetch64(s), Fetch64(s + 8) + k0))
|
||||
: CityHash128WithSeed(s, len, uint128(k0, k1));
|
||||
u128 CityHash128(const char* s, size_t len) {
|
||||
return len >= 16 ? CityHash128WithSeed(s + 16, len - 16, u128{Fetch64(s), Fetch64(s + 8) + k0})
|
||||
: CityHash128WithSeed(s, len, u128{k0, k1});
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -62,49 +62,38 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
using uint128 = std::pair<uint64_t, uint64_t>;
|
||||
|
||||
[[nodiscard]] inline uint64_t Uint128Low64(const uint128& x) {
|
||||
return x.first;
|
||||
}
|
||||
[[nodiscard]] inline uint64_t Uint128High64(const uint128& x) {
|
||||
return x.second;
|
||||
}
|
||||
|
||||
// Hash function for a byte array.
|
||||
[[nodiscard]] uint64_t CityHash64(const char* buf, std::size_t len);
|
||||
[[nodiscard]] u64 CityHash64(const char* buf, size_t len);
|
||||
|
||||
// Hash function for a byte array. For convenience, a 64-bit seed is also
|
||||
// hashed into the result.
|
||||
[[nodiscard]] uint64_t CityHash64WithSeed(const char* buf, std::size_t len, uint64_t seed);
|
||||
[[nodiscard]] u64 CityHash64WithSeed(const char* buf, size_t len, u64 seed);
|
||||
|
||||
// Hash function for a byte array. For convenience, two seeds are also
|
||||
// hashed into the result.
|
||||
[[nodiscard]] uint64_t CityHash64WithSeeds(const char* buf, std::size_t len, uint64_t seed0,
|
||||
uint64_t seed1);
|
||||
[[nodiscard]] u64 CityHash64WithSeeds(const char* buf, size_t len, u64 seed0, u64 seed1);
|
||||
|
||||
// Hash function for a byte array.
|
||||
[[nodiscard]] uint128 CityHash128(const char* s, std::size_t len);
|
||||
[[nodiscard]] u128 CityHash128(const char* s, size_t len);
|
||||
|
||||
// Hash function for a byte array. For convenience, a 128-bit seed is also
|
||||
// hashed into the result.
|
||||
[[nodiscard]] uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed);
|
||||
[[nodiscard]] u128 CityHash128WithSeed(const char* s, size_t len, u128 seed);
|
||||
|
||||
// Hash 128 input bits down to 64 bits of output.
|
||||
// This is intended to be a reasonably good hash function.
|
||||
[[nodiscard]] inline uint64_t Hash128to64(const uint128& x) {
|
||||
[[nodiscard]] inline u64 Hash128to64(const u128& x) {
|
||||
// Murmur-inspired hashing.
|
||||
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
|
||||
uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul;
|
||||
const u64 mul = 0x9ddfea08eb382d69ULL;
|
||||
u64 a = (x[0] ^ x[1]) * mul;
|
||||
a ^= (a >> 47);
|
||||
uint64_t b = (Uint128High64(x) ^ a) * kMul;
|
||||
u64 b = (x[1] ^ a) * mul;
|
||||
b ^= (b >> 47);
|
||||
b *= kMul;
|
||||
b *= mul;
|
||||
return b;
|
||||
}
|
||||
|
||||
|
||||
@@ -52,9 +52,13 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
|
||||
// Generic function to get last error message.
|
||||
// Call directly after the command or use the error num.
|
||||
// This function might change the error code.
|
||||
// Defined in Misc.cpp.
|
||||
// Defined in misc.cpp.
|
||||
[[nodiscard]] std::string GetLastErrorMsg();
|
||||
|
||||
// Like GetLastErrorMsg(), but passing an explicit error code.
|
||||
// Defined in misc.cpp.
|
||||
[[nodiscard]] std::string NativeErrorToString(int e);
|
||||
|
||||
#define DECLARE_ENUM_FLAG_OPERATORS(type) \
|
||||
[[nodiscard]] constexpr type operator|(type a, type b) noexcept { \
|
||||
using T = std::underlying_type_t<type>; \
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace Common {
|
||||
|
||||
constexpr std::size_t default_stack_size = 256 * 1024;
|
||||
constexpr std::size_t default_stack_size = 512 * 1024;
|
||||
|
||||
struct Fiber::FiberImpl {
|
||||
FiberImpl() : stack{default_stack_size}, rewind_stack{default_stack_size} {}
|
||||
@@ -116,16 +116,19 @@ void Fiber::Rewind() {
|
||||
boost::context::detail::jump_fcontext(impl->rewind_context, this);
|
||||
}
|
||||
|
||||
void Fiber::YieldTo(std::shared_ptr<Fiber> from, std::shared_ptr<Fiber> to) {
|
||||
ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
|
||||
ASSERT_MSG(to != nullptr, "Next fiber is null!");
|
||||
to->impl->guard.lock();
|
||||
to->impl->previous_fiber = from;
|
||||
auto transfer = boost::context::detail::jump_fcontext(to->impl->context, to.get());
|
||||
ASSERT(from->impl->previous_fiber != nullptr);
|
||||
from->impl->previous_fiber->impl->context = transfer.fctx;
|
||||
from->impl->previous_fiber->impl->guard.unlock();
|
||||
from->impl->previous_fiber.reset();
|
||||
void Fiber::YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to) {
|
||||
to.impl->guard.lock();
|
||||
to.impl->previous_fiber = weak_from.lock();
|
||||
|
||||
auto transfer = boost::context::detail::jump_fcontext(to.impl->context, &to);
|
||||
|
||||
// "from" might no longer be valid if the thread was killed
|
||||
if (auto from = weak_from.lock()) {
|
||||
ASSERT(from->impl->previous_fiber != nullptr);
|
||||
from->impl->previous_fiber->impl->context = transfer.fctx;
|
||||
from->impl->previous_fiber->impl->guard.unlock();
|
||||
from->impl->previous_fiber.reset();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
|
||||
|
||||
@@ -41,7 +41,7 @@ public:
|
||||
|
||||
/// Yields control from Fiber 'from' to Fiber 'to'
|
||||
/// Fiber 'from' must be the currently running fiber.
|
||||
static void YieldTo(std::shared_ptr<Fiber> from, std::shared_ptr<Fiber> to);
|
||||
static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to);
|
||||
[[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber();
|
||||
|
||||
void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param);
|
||||
|
||||
@@ -12,27 +12,41 @@
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
|
||||
// Generic function to get last error message.
|
||||
// Call directly after the command or use the error num.
|
||||
// This function might change the error code.
|
||||
std::string GetLastErrorMsg() {
|
||||
static constexpr std::size_t buff_size = 255;
|
||||
char err_str[buff_size];
|
||||
|
||||
std::string NativeErrorToString(int e) {
|
||||
#ifdef _WIN32
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, nullptr, GetLastError(),
|
||||
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_str, buff_size, nullptr);
|
||||
return std::string(err_str, buff_size);
|
||||
#elif defined(__GLIBC__) && (_GNU_SOURCE || (_POSIX_C_SOURCE < 200112L && _XOPEN_SOURCE < 600))
|
||||
LPSTR err_str;
|
||||
|
||||
DWORD res = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
nullptr, e, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||
reinterpret_cast<LPSTR>(&err_str), 1, nullptr);
|
||||
if (!res) {
|
||||
return "(FormatMessageA failed to format error)";
|
||||
}
|
||||
std::string ret(err_str);
|
||||
LocalFree(err_str);
|
||||
return ret;
|
||||
#else
|
||||
char err_str[255];
|
||||
#if defined(__GLIBC__) && (_GNU_SOURCE || (_POSIX_C_SOURCE < 200112L && _XOPEN_SOURCE < 600))
|
||||
// Thread safe (GNU-specific)
|
||||
const char* str = strerror_r(errno, err_str, buff_size);
|
||||
const char* str = strerror_r(e, err_str, sizeof(err_str));
|
||||
return std::string(str);
|
||||
#else
|
||||
// Thread safe (XSI-compliant)
|
||||
const int success = strerror_r(errno, err_str, buff_size);
|
||||
if (success != 0) {
|
||||
return {};
|
||||
int second_err = strerror_r(e, err_str, sizeof(err_str));
|
||||
if (second_err != 0) {
|
||||
return "(strerror_r failed to format error)";
|
||||
}
|
||||
return std::string(err_str);
|
||||
#endif // GLIBC etc.
|
||||
#endif // _WIN32
|
||||
}
|
||||
|
||||
std::string GetLastErrorMsg() {
|
||||
#ifdef _WIN32
|
||||
return NativeErrorToString(GetLastError());
|
||||
#else
|
||||
return NativeErrorToString(errno);
|
||||
#endif
|
||||
}
|
||||
|
||||
250
src/common/tiny_mt.h
Normal file
250
src/common/tiny_mt.h
Normal file
@@ -0,0 +1,250 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
// Implementation of TinyMT (mersenne twister RNG).
|
||||
// Like Nintendo, we will use the sample parameters.
|
||||
class TinyMT {
|
||||
public:
|
||||
static constexpr std::size_t NumStateWords = 4;
|
||||
|
||||
struct State {
|
||||
std::array<u32, NumStateWords> data{};
|
||||
};
|
||||
|
||||
private:
|
||||
static constexpr u32 ParamMat1 = 0x8F7011EE;
|
||||
static constexpr u32 ParamMat2 = 0xFC78FF1F;
|
||||
static constexpr u32 ParamTmat = 0x3793FDFF;
|
||||
|
||||
static constexpr u32 ParamMult = 0x6C078965;
|
||||
static constexpr u32 ParamPlus = 0x0019660D;
|
||||
static constexpr u32 ParamXor = 0x5D588B65;
|
||||
|
||||
static constexpr u32 TopBitmask = 0x7FFFFFFF;
|
||||
|
||||
static constexpr int MinimumInitIterations = 8;
|
||||
static constexpr int NumDiscardedInitOutputs = 8;
|
||||
|
||||
static constexpr u32 XorByShifted27(u32 value) {
|
||||
return value ^ (value >> 27);
|
||||
}
|
||||
|
||||
static constexpr u32 XorByShifted30(u32 value) {
|
||||
return value ^ (value >> 30);
|
||||
}
|
||||
|
||||
private:
|
||||
State state{};
|
||||
|
||||
private:
|
||||
// Internal API.
|
||||
void FinalizeInitialization() {
|
||||
const u32 state0 = this->state.data[0] & TopBitmask;
|
||||
const u32 state1 = this->state.data[1];
|
||||
const u32 state2 = this->state.data[2];
|
||||
const u32 state3 = this->state.data[3];
|
||||
|
||||
if (state0 == 0 && state1 == 0 && state2 == 0 && state3 == 0) {
|
||||
this->state.data[0] = 'T';
|
||||
this->state.data[1] = 'I';
|
||||
this->state.data[2] = 'N';
|
||||
this->state.data[3] = 'Y';
|
||||
}
|
||||
|
||||
for (int i = 0; i < NumDiscardedInitOutputs; i++) {
|
||||
this->GenerateRandomU32();
|
||||
}
|
||||
}
|
||||
|
||||
u32 GenerateRandomU24() {
|
||||
return (this->GenerateRandomU32() >> 8);
|
||||
}
|
||||
|
||||
static void GenerateInitialValuePlus(TinyMT::State* state, int index, u32 value) {
|
||||
u32& state0 = state->data[(index + 0) % NumStateWords];
|
||||
u32& state1 = state->data[(index + 1) % NumStateWords];
|
||||
u32& state2 = state->data[(index + 2) % NumStateWords];
|
||||
u32& state3 = state->data[(index + 3) % NumStateWords];
|
||||
|
||||
const u32 x = XorByShifted27(state0 ^ state1 ^ state3) * ParamPlus;
|
||||
const u32 y = x + index + value;
|
||||
|
||||
state0 = y;
|
||||
state1 += x;
|
||||
state2 += y;
|
||||
}
|
||||
|
||||
static void GenerateInitialValueXor(TinyMT::State* state, int index) {
|
||||
u32& state0 = state->data[(index + 0) % NumStateWords];
|
||||
u32& state1 = state->data[(index + 1) % NumStateWords];
|
||||
u32& state2 = state->data[(index + 2) % NumStateWords];
|
||||
u32& state3 = state->data[(index + 3) % NumStateWords];
|
||||
|
||||
const u32 x = XorByShifted27(state0 + state1 + state3) * ParamXor;
|
||||
const u32 y = x - index;
|
||||
|
||||
state0 = y;
|
||||
state1 ^= x;
|
||||
state2 ^= y;
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr TinyMT() = default;
|
||||
|
||||
// Public API.
|
||||
|
||||
// Initialization.
|
||||
void Initialize(u32 seed) {
|
||||
this->state.data[0] = seed;
|
||||
this->state.data[1] = ParamMat1;
|
||||
this->state.data[2] = ParamMat2;
|
||||
this->state.data[3] = ParamTmat;
|
||||
|
||||
for (int i = 1; i < MinimumInitIterations; i++) {
|
||||
const u32 mixed = XorByShifted30(this->state.data[(i - 1) % NumStateWords]);
|
||||
this->state.data[i % NumStateWords] ^= mixed * ParamMult + i;
|
||||
}
|
||||
|
||||
this->FinalizeInitialization();
|
||||
}
|
||||
|
||||
void Initialize(const u32* seed, int seed_count) {
|
||||
this->state.data[0] = 0;
|
||||
this->state.data[1] = ParamMat1;
|
||||
this->state.data[2] = ParamMat2;
|
||||
this->state.data[3] = ParamTmat;
|
||||
|
||||
{
|
||||
const int num_init_iterations = std::max(seed_count + 1, MinimumInitIterations) - 1;
|
||||
|
||||
GenerateInitialValuePlus(&this->state, 0, seed_count);
|
||||
|
||||
for (int i = 0; i < num_init_iterations; i++) {
|
||||
GenerateInitialValuePlus(&this->state, (i + 1) % NumStateWords,
|
||||
(i < seed_count) ? seed[i] : 0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < static_cast<int>(NumStateWords); i++) {
|
||||
GenerateInitialValueXor(&this->state,
|
||||
(i + 1 + num_init_iterations) % NumStateWords);
|
||||
}
|
||||
}
|
||||
|
||||
this->FinalizeInitialization();
|
||||
}
|
||||
|
||||
// State management.
|
||||
void GetState(TinyMT::State& out) const {
|
||||
out.data = this->state.data;
|
||||
}
|
||||
|
||||
void SetState(const TinyMT::State& state_) {
|
||||
this->state.data = state_.data;
|
||||
}
|
||||
|
||||
// Random generation.
|
||||
void GenerateRandomBytes(void* dst, std::size_t size) {
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(dst);
|
||||
const uintptr_t end = start + size;
|
||||
const uintptr_t aligned_start = Common::AlignUp(start, 4);
|
||||
const uintptr_t aligned_end = Common::AlignDown(end, 4);
|
||||
|
||||
// Make sure we're aligned.
|
||||
if (start < aligned_start) {
|
||||
const u32 rnd = this->GenerateRandomU32();
|
||||
std::memcpy(dst, &rnd, aligned_start - start);
|
||||
}
|
||||
|
||||
// Write as many aligned u32s as we can.
|
||||
{
|
||||
u32* cur_dst = reinterpret_cast<u32*>(aligned_start);
|
||||
u32* const end_dst = reinterpret_cast<u32*>(aligned_end);
|
||||
|
||||
while (cur_dst < end_dst) {
|
||||
*(cur_dst++) = this->GenerateRandomU32();
|
||||
}
|
||||
}
|
||||
|
||||
// Handle any leftover unaligned data.
|
||||
if (aligned_end < end) {
|
||||
const u32 rnd = this->GenerateRandomU32();
|
||||
std::memcpy(reinterpret_cast<void*>(aligned_end), &rnd, end - aligned_end);
|
||||
}
|
||||
}
|
||||
|
||||
u32 GenerateRandomU32() {
|
||||
// Advance state.
|
||||
const u32 x0 =
|
||||
(this->state.data[0] & TopBitmask) ^ this->state.data[1] ^ this->state.data[2];
|
||||
const u32 y0 = this->state.data[3];
|
||||
const u32 x1 = x0 ^ (x0 << 1);
|
||||
const u32 y1 = y0 ^ (y0 >> 1) ^ x1;
|
||||
|
||||
const u32 state0 = this->state.data[1];
|
||||
u32 state1 = this->state.data[2];
|
||||
u32 state2 = x1 ^ (y1 << 10);
|
||||
const u32 state3 = y1;
|
||||
|
||||
if ((y1 & 1) != 0) {
|
||||
state1 ^= ParamMat1;
|
||||
state2 ^= ParamMat2;
|
||||
}
|
||||
|
||||
this->state.data[0] = state0;
|
||||
this->state.data[1] = state1;
|
||||
this->state.data[2] = state2;
|
||||
this->state.data[3] = state3;
|
||||
|
||||
// Temper.
|
||||
const u32 t1 = state0 + (state2 >> 8);
|
||||
u32 t0 = state3 ^ t1;
|
||||
|
||||
if ((t1 & 1) != 0) {
|
||||
t0 ^= ParamTmat;
|
||||
}
|
||||
|
||||
return t0;
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64() {
|
||||
const u32 lo = this->GenerateRandomU32();
|
||||
const u32 hi = this->GenerateRandomU32();
|
||||
return (u64{hi} << 32) | u64{lo};
|
||||
}
|
||||
|
||||
float GenerateRandomF32() {
|
||||
// Floats have 24 bits of mantissa.
|
||||
constexpr u32 MantissaBits = 24;
|
||||
return static_cast<float>(GenerateRandomU24()) * (1.0f / (1U << MantissaBits));
|
||||
}
|
||||
|
||||
double GenerateRandomF64() {
|
||||
// Doubles have 53 bits of mantissa.
|
||||
// The smart way to generate 53 bits of random would be to use 32 bits
|
||||
// from the first rnd32() call, and then 21 from the second.
|
||||
// Nintendo does not. They use (32 - 5) = 27 bits from the first rnd32()
|
||||
// call, and (32 - 6) bits from the second. We'll do what they do, but
|
||||
// There's not a clear reason why.
|
||||
constexpr u32 MantissaBits = 53;
|
||||
constexpr u32 Shift1st = (64 - MantissaBits) / 2;
|
||||
constexpr u32 Shift2nd = (64 - MantissaBits) - Shift1st;
|
||||
|
||||
const u32 first = (this->GenerateRandomU32() >> Shift1st);
|
||||
const u32 second = (this->GenerateRandomU32() >> Shift2nd);
|
||||
|
||||
return (1.0 * first * (u64{1} << (32 - Shift2nd)) + second) *
|
||||
(1.0 / (u64{1} << MantissaBits));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
@@ -1,71 +0,0 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
|
||||
#pragma intrinsic(_umul128)
|
||||
#pragma intrinsic(_udiv128)
|
||||
#endif
|
||||
#include <cstring>
|
||||
#include "common/uint128.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
|
||||
u128 r{};
|
||||
r[0] = _umul128(a, b, &r[1]);
|
||||
u64 remainder;
|
||||
#if _MSC_VER < 1923
|
||||
return udiv128(r[1], r[0], d, &remainder);
|
||||
#else
|
||||
return _udiv128(r[1], r[0], d, &remainder);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
|
||||
const u64 diva = a / d;
|
||||
const u64 moda = a % d;
|
||||
const u64 divb = b / d;
|
||||
const u64 modb = b % d;
|
||||
return diva * b + moda * divb + moda * modb / d;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
u128 Multiply64Into128(u64 a, u64 b) {
|
||||
u128 result;
|
||||
#ifdef _MSC_VER
|
||||
result[0] = _umul128(a, b, &result[1]);
|
||||
#else
|
||||
unsigned __int128 tmp = a;
|
||||
tmp *= b;
|
||||
std::memcpy(&result, &tmp, sizeof(u128));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor) {
|
||||
u64 remainder = dividend[0] % divisor;
|
||||
u64 accum = dividend[0] / divisor;
|
||||
if (dividend[1] == 0)
|
||||
return {accum, remainder};
|
||||
// We ignore dividend[1] / divisor as that overflows
|
||||
const u64 first_segment = (dividend[1] % divisor) << 32;
|
||||
accum += (first_segment / divisor) << 32;
|
||||
const u64 second_segment = (first_segment % divisor) << 32;
|
||||
accum += (second_segment / divisor);
|
||||
remainder += second_segment % divisor;
|
||||
if (remainder >= divisor) {
|
||||
accum++;
|
||||
remainder -= divisor;
|
||||
}
|
||||
return {accum, remainder};
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
@@ -4,19 +4,118 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#pragma intrinsic(__umulh)
|
||||
#pragma intrinsic(_umul128)
|
||||
#pragma intrinsic(_udiv128)
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
// This function multiplies 2 u64 values and divides it by a u64 value.
|
||||
[[nodiscard]] u64 MultiplyAndDivide64(u64 a, u64 b, u64 d);
|
||||
[[nodiscard]] static inline u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
|
||||
#ifdef _MSC_VER
|
||||
u128 r{};
|
||||
r[0] = _umul128(a, b, &r[1]);
|
||||
u64 remainder;
|
||||
#if _MSC_VER < 1923
|
||||
return udiv128(r[1], r[0], d, &remainder);
|
||||
#else
|
||||
return _udiv128(r[1], r[0], d, &remainder);
|
||||
#endif
|
||||
#else
|
||||
const u64 diva = a / d;
|
||||
const u64 moda = a % d;
|
||||
const u64 divb = b / d;
|
||||
const u64 modb = b % d;
|
||||
return diva * b + moda * divb + moda * modb / d;
|
||||
#endif
|
||||
}
|
||||
|
||||
// This function multiplies 2 u64 values and produces a u128 value;
|
||||
[[nodiscard]] u128 Multiply64Into128(u64 a, u64 b);
|
||||
[[nodiscard]] static inline u128 Multiply64Into128(u64 a, u64 b) {
|
||||
u128 result;
|
||||
#ifdef _MSC_VER
|
||||
result[0] = _umul128(a, b, &result[1]);
|
||||
#else
|
||||
unsigned __int128 tmp = a;
|
||||
tmp *= b;
|
||||
std::memcpy(&result, &tmp, sizeof(u128));
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
[[nodiscard]] static inline u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
const auto base = static_cast<unsigned __int128>(numerator) << 64ULL;
|
||||
return static_cast<u64>(base / divisor);
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
std::array<u64, 2> r = {0, numerator};
|
||||
u64 remainder;
|
||||
#if _MSC_VER < 1923
|
||||
return udiv128(r[1], r[0], divisor, &remainder);
|
||||
#else
|
||||
return _udiv128(r[1], r[0], divisor, &remainder);
|
||||
#endif
|
||||
#else
|
||||
// This one is bit more inaccurate.
|
||||
return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
|
||||
#endif
|
||||
}
|
||||
|
||||
[[nodiscard]] static inline u64 MultiplyHigh(u64 a, u64 b) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64;
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
return __umulh(a, b); // MSVC
|
||||
#else
|
||||
// Generic fallback
|
||||
const u64 a_lo = u32(a);
|
||||
const u64 a_hi = a >> 32;
|
||||
const u64 b_lo = u32(b);
|
||||
const u64 b_hi = b >> 32;
|
||||
|
||||
const u64 a_x_b_hi = a_hi * b_hi;
|
||||
const u64 a_x_b_mid = a_hi * b_lo;
|
||||
const u64 b_x_a_mid = b_hi * a_lo;
|
||||
const u64 a_x_b_lo = a_lo * b_lo;
|
||||
|
||||
const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) +
|
||||
static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >>
|
||||
32;
|
||||
|
||||
const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
|
||||
|
||||
return multhi;
|
||||
#endif
|
||||
}
|
||||
|
||||
// This function divides a u128 by a u32 value and produces two u64 values:
|
||||
// the result of division and the remainder
|
||||
[[nodiscard]] std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor);
|
||||
[[nodiscard]] static inline std::pair<u64, u64> Divide128On32(u128 dividend, u32 divisor) {
|
||||
u64 remainder = dividend[0] % divisor;
|
||||
u64 accum = dividend[0] / divisor;
|
||||
if (dividend[1] == 0)
|
||||
return {accum, remainder};
|
||||
// We ignore dividend[1] / divisor as that overflows
|
||||
const u64 first_segment = (dividend[1] % divisor) << 32;
|
||||
accum += (first_segment / divisor) << 32;
|
||||
const u64 second_segment = (first_segment % divisor) << 32;
|
||||
accum += (second_segment / divisor);
|
||||
remainder += second_segment % divisor;
|
||||
if (remainder >= divisor) {
|
||||
accum++;
|
||||
remainder -= divisor;
|
||||
}
|
||||
return {accum, remainder};
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "common/uint128.h"
|
||||
#include "common/wall_clock.h"
|
||||
|
||||
|
||||
@@ -8,68 +8,10 @@
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
|
||||
#pragma intrinsic(__umulh)
|
||||
#pragma intrinsic(_udiv128)
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
#include "common/atomic_ops.h"
|
||||
#include "common/uint128.h"
|
||||
#include "common/x64/native_clock.h"
|
||||
|
||||
namespace {
|
||||
|
||||
[[nodiscard]] u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
const auto base = static_cast<unsigned __int128>(numerator) << 64ULL;
|
||||
return static_cast<u64>(base / divisor);
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
std::array<u64, 2> r = {0, numerator};
|
||||
u64 remainder;
|
||||
#if _MSC_VER < 1923
|
||||
return udiv128(r[1], r[0], divisor, &remainder);
|
||||
#else
|
||||
return _udiv128(r[1], r[0], divisor, &remainder);
|
||||
#endif
|
||||
#else
|
||||
// This one is bit more inaccurate.
|
||||
return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
|
||||
#endif
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 MultiplyHigh(u64 a, u64 b) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64;
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
return __umulh(a, b); // MSVC
|
||||
#else
|
||||
// Generic fallback
|
||||
const u64 a_lo = u32(a);
|
||||
const u64 a_hi = a >> 32;
|
||||
const u64 b_lo = u32(b);
|
||||
const u64 b_hi = b >> 32;
|
||||
|
||||
const u64 a_x_b_hi = a_hi * b_hi;
|
||||
const u64 a_x_b_mid = a_hi * b_lo;
|
||||
const u64 b_x_a_mid = b_hi * a_lo;
|
||||
const u64 a_x_b_lo = a_lo * b_lo;
|
||||
|
||||
const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) +
|
||||
static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >>
|
||||
32;
|
||||
|
||||
const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
|
||||
|
||||
return multhi;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace Common {
|
||||
|
||||
u64 EstimateRDTSCFrequency() {
|
||||
|
||||
@@ -19,7 +19,6 @@ add_library(core STATIC
|
||||
core.h
|
||||
core_timing.cpp
|
||||
core_timing.h
|
||||
core_timing_util.cpp
|
||||
core_timing_util.h
|
||||
cpu_manager.cpp
|
||||
cpu_manager.h
|
||||
@@ -148,7 +147,7 @@ add_library(core STATIC
|
||||
hle/kernel/client_session.h
|
||||
hle/kernel/code_set.cpp
|
||||
hle/kernel/code_set.h
|
||||
hle/kernel/errors.h
|
||||
hle/kernel/svc_results.h
|
||||
hle/kernel/global_scheduler_context.cpp
|
||||
hle/kernel/global_scheduler_context.h
|
||||
hle/kernel/handle_table.cpp
|
||||
@@ -157,6 +156,8 @@ add_library(core STATIC
|
||||
hle/kernel/hle_ipc.h
|
||||
hle/kernel/k_address_arbiter.cpp
|
||||
hle/kernel/k_address_arbiter.h
|
||||
hle/kernel/k_address_space_info.cpp
|
||||
hle/kernel/k_address_space_info.h
|
||||
hle/kernel/k_affinity_mask.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
@@ -165,6 +166,18 @@ add_library(core STATIC
|
||||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
hle/kernel/k_memory_block.h
|
||||
hle/kernel/k_memory_block_manager.cpp
|
||||
hle/kernel/k_memory_block_manager.h
|
||||
hle/kernel/k_memory_layout.h
|
||||
hle/kernel/k_memory_manager.cpp
|
||||
hle/kernel/k_memory_manager.h
|
||||
hle/kernel/k_page_bitmap.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
hle/kernel/k_page_linked_list.h
|
||||
hle/kernel/k_page_table.cpp
|
||||
hle/kernel/k_page_table.h
|
||||
hle/kernel/k_priority_queue.h
|
||||
hle/kernel/k_readable_event.cpp
|
||||
hle/kernel/k_readable_event.h
|
||||
@@ -174,9 +187,17 @@ add_library(core STATIC
|
||||
hle/kernel/k_scheduler.h
|
||||
hle/kernel/k_scheduler_lock.h
|
||||
hle/kernel/k_scoped_lock.h
|
||||
hle/kernel/k_scoped_resource_reservation.h
|
||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||
hle/kernel/k_shared_memory.cpp
|
||||
hle/kernel/k_shared_memory.h
|
||||
hle/kernel/k_slab_heap.h
|
||||
hle/kernel/k_spin_lock.cpp
|
||||
hle/kernel/k_spin_lock.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_system_control.cpp
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_queue.h
|
||||
@@ -184,23 +205,7 @@ add_library(core STATIC
|
||||
hle/kernel/k_writable_event.h
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory/address_space_info.cpp
|
||||
hle/kernel/memory/address_space_info.h
|
||||
hle/kernel/memory/memory_block.h
|
||||
hle/kernel/memory/memory_block_manager.cpp
|
||||
hle/kernel/memory/memory_block_manager.h
|
||||
hle/kernel/memory/memory_layout.h
|
||||
hle/kernel/memory/memory_manager.cpp
|
||||
hle/kernel/memory/memory_manager.h
|
||||
hle/kernel/memory/memory_types.h
|
||||
hle/kernel/memory/page_linked_list.h
|
||||
hle/kernel/memory/page_heap.cpp
|
||||
hle/kernel/memory/page_heap.h
|
||||
hle/kernel/memory/page_table.cpp
|
||||
hle/kernel/memory/page_table.h
|
||||
hle/kernel/memory/slab_heap.h
|
||||
hle/kernel/memory/system_control.cpp
|
||||
hle/kernel/memory/system_control.h
|
||||
hle/kernel/memory_types.h
|
||||
hle/kernel/object.cpp
|
||||
hle/kernel/object.h
|
||||
hle/kernel/physical_core.cpp
|
||||
@@ -218,12 +223,9 @@ add_library(core STATIC
|
||||
hle/kernel/service_thread.h
|
||||
hle/kernel/session.cpp
|
||||
hle/kernel/session.h
|
||||
hle/kernel/shared_memory.cpp
|
||||
hle/kernel/shared_memory.h
|
||||
hle/kernel/svc.cpp
|
||||
hle/kernel/svc.h
|
||||
hle/kernel/svc_common.h
|
||||
hle/kernel/svc_results.h
|
||||
hle/kernel/svc_types.h
|
||||
hle/kernel/svc_wrap.h
|
||||
hle/kernel/time_manager.cpp
|
||||
@@ -266,6 +268,7 @@ add_library(core STATIC
|
||||
hle/service/am/applets/software_keyboard.h
|
||||
hle/service/am/applets/web_browser.cpp
|
||||
hle/service/am/applets/web_browser.h
|
||||
hle/service/am/applets/web_types.h
|
||||
hle/service/am/idle.cpp
|
||||
hle/service/am/idle.h
|
||||
hle/service/am/omm.cpp
|
||||
@@ -400,6 +403,7 @@ add_library(core STATIC
|
||||
hle/service/hid/controllers/xpad.h
|
||||
hle/service/lbl/lbl.cpp
|
||||
hle/service/lbl/lbl.h
|
||||
hle/service/ldn/errors.h
|
||||
hle/service/ldn/ldn.cpp
|
||||
hle/service/ldn/ldn.h
|
||||
hle/service/ldr/ldr.cpp
|
||||
@@ -653,6 +657,8 @@ else()
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
|
||||
|
||||
-Wno-sign-conversion
|
||||
)
|
||||
endif()
|
||||
|
||||
@@ -114,18 +114,17 @@ public:
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* page_table) const {
|
||||
Dynarmic::A32::UserConfig config;
|
||||
config.callbacks = cb.get();
|
||||
// TODO(bunnei): Implement page table for 32-bit
|
||||
// config.page_table = &page_table.pointers;
|
||||
config.coprocessors[15] = cp15;
|
||||
config.define_unpredictable_behaviour = true;
|
||||
static constexpr std::size_t PAGE_BITS = 12;
|
||||
static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table.pointers.data());
|
||||
if (page_table) {
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table->pointers.data());
|
||||
}
|
||||
config.absolute_offset_page_table = true;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
@@ -201,7 +200,8 @@ ARM_Dynarmic_32::ARM_Dynarmic_32(System& system, CPUInterrupts& interrupt_handle
|
||||
: ARM_Interface{system, interrupt_handlers, uses_wall_clock},
|
||||
cb(std::make_unique<DynarmicCallbacks32>(*this)),
|
||||
cp15(std::make_shared<DynarmicCP15>(*this)), core_index{core_index},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)},
|
||||
jit(MakeJit(nullptr)) {}
|
||||
|
||||
ARM_Dynarmic_32::~ARM_Dynarmic_32() = default;
|
||||
|
||||
@@ -256,9 +256,6 @@ void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
jit->SaveContext(context);
|
||||
ctx.cpu_registers = context.Regs();
|
||||
@@ -268,9 +265,6 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
context.Regs() = ctx.cpu_registers;
|
||||
context.ExtRegs() = ctx.extension_registers;
|
||||
@@ -284,35 +278,31 @@ void ARM_Dynarmic_32::PrepareReschedule() {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearInstructionCache() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->InvalidateCacheRange(static_cast<u32>(addr), size);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::ClearExclusiveState() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearExclusiveState();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
ThreadContext32 ctx{};
|
||||
SaveContext(ctx);
|
||||
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
if (iter != jit_cache.end()) {
|
||||
jit = iter->second;
|
||||
LoadContext(ctx);
|
||||
return;
|
||||
}
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
jit = MakeJit(&page_table);
|
||||
LoadContext(ctx);
|
||||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
|
||||
@@ -68,8 +68,7 @@ public:
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
using JitCacheType =
|
||||
@@ -80,10 +79,10 @@ private:
|
||||
|
||||
std::unique_ptr<DynarmicCallbacks32> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> jit;
|
||||
std::shared_ptr<DynarmicCP15> cp15;
|
||||
std::size_t core_index;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
std::shared_ptr<Dynarmic::A32::Jit> jit;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -142,7 +142,7 @@ public:
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& page_table,
|
||||
std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* page_table,
|
||||
std::size_t address_space_bits) const {
|
||||
Dynarmic::A64::UserConfig config;
|
||||
|
||||
@@ -150,13 +150,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
config.callbacks = cb.get();
|
||||
|
||||
// Memory
|
||||
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
if (page_table) {
|
||||
config.page_table = reinterpret_cast<void**>(page_table->pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
}
|
||||
|
||||
// Multi-process state
|
||||
config.processor_id = core_index;
|
||||
@@ -237,7 +239,8 @@ ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, CPUInterrupts& interrupt_handle
|
||||
std::size_t core_index)
|
||||
: ARM_Interface{system, interrupt_handlers, uses_wall_clock},
|
||||
cb(std::make_unique<DynarmicCallbacks64>(*this)), core_index{core_index},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)},
|
||||
jit(MakeJit(nullptr, 48)) {}
|
||||
|
||||
ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
|
||||
|
||||
@@ -294,9 +297,6 @@ void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
ctx.cpu_registers = jit->GetRegisters();
|
||||
ctx.sp = jit->GetSP();
|
||||
ctx.pc = jit->GetPC();
|
||||
@@ -308,9 +308,6 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->SetRegisters(ctx.cpu_registers);
|
||||
jit->SetSP(ctx.sp);
|
||||
jit->SetPC(ctx.pc);
|
||||
@@ -326,35 +323,31 @@ void ARM_Dynarmic_64::PrepareReschedule() {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::ClearInstructionCache() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->InvalidateCacheRange(addr, size);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::ClearExclusiveState() {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->ClearExclusiveState();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
|
||||
std::size_t new_address_space_size_in_bits) {
|
||||
ThreadContext64 ctx{};
|
||||
SaveContext(ctx);
|
||||
|
||||
auto key = std::make_pair(&page_table, new_address_space_size_in_bits);
|
||||
auto iter = jit_cache.find(key);
|
||||
if (iter != jit_cache.end()) {
|
||||
jit = iter->second;
|
||||
LoadContext(ctx);
|
||||
return;
|
||||
}
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
jit = MakeJit(&page_table, new_address_space_size_in_bits);
|
||||
LoadContext(ctx);
|
||||
jit_cache.emplace(key, jit);
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable& page_table,
|
||||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
|
||||
std::size_t address_space_bits) const;
|
||||
|
||||
using JitCacheKey = std::pair<Common::PageTable*, std::size_t>;
|
||||
@@ -71,10 +71,11 @@ private:
|
||||
friend class DynarmicCallbacks64;
|
||||
std::unique_ptr<DynarmicCallbacks64> cb;
|
||||
JitCacheType jit_cache;
|
||||
std::shared_ptr<Dynarmic::A64::Jit> jit;
|
||||
|
||||
std::size_t core_index;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
|
||||
std::shared_ptr<Dynarmic::A64::Jit> jit;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -299,25 +299,17 @@ struct System::Impl {
|
||||
gpu_core->WaitIdle();
|
||||
}
|
||||
|
||||
// Shutdown emulation session
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
|
||||
// Close all CPU/threading state
|
||||
cpu_manager.Shutdown();
|
||||
|
||||
// Shutdown kernel and core timing
|
||||
time_manager.Shutdown();
|
||||
core_timing.Shutdown();
|
||||
kernel.Shutdown();
|
||||
|
||||
// Close app loader
|
||||
app_loader.reset();
|
||||
gpu_core.reset();
|
||||
perf_stats.reset();
|
||||
|
||||
// Clear all applets
|
||||
kernel.Shutdown();
|
||||
applet_manager.ClearAll();
|
||||
|
||||
LOG_DEBUG(Core, "Shutdown OK");
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2+
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/core_timing_util.h"
|
||||
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
#include "common/logging/log.h"
|
||||
#include "common/uint128.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Core::Timing {
|
||||
|
||||
constexpr u64 MAX_VALUE_TO_MULTIPLY = std::numeric_limits<s64>::max() / Hardware::BASE_CLOCK_RATE;
|
||||
|
||||
s64 msToCycles(std::chrono::milliseconds ms) {
|
||||
if (static_cast<u64>(ms.count() / 1000) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
||||
return std::numeric_limits<s64>::max();
|
||||
}
|
||||
if (static_cast<u64>(ms.count()) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
||||
return Hardware::BASE_CLOCK_RATE * (ms.count() / 1000);
|
||||
}
|
||||
return (Hardware::BASE_CLOCK_RATE * ms.count()) / 1000;
|
||||
}
|
||||
|
||||
s64 usToCycles(std::chrono::microseconds us) {
|
||||
if (static_cast<u64>(us.count() / 1000000) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_ERROR(Core_Timing, "Integer overflow, use max value");
|
||||
return std::numeric_limits<s64>::max();
|
||||
}
|
||||
if (static_cast<u64>(us.count()) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
||||
return Hardware::BASE_CLOCK_RATE * (us.count() / 1000000);
|
||||
}
|
||||
return (Hardware::BASE_CLOCK_RATE * us.count()) / 1000000;
|
||||
}
|
||||
|
||||
s64 nsToCycles(std::chrono::nanoseconds ns) {
|
||||
const u128 temporal = Common::Multiply64Into128(ns.count(), Hardware::BASE_CLOCK_RATE);
|
||||
return Common::Divide128On32(temporal, static_cast<u32>(1000000000)).first;
|
||||
}
|
||||
|
||||
u64 msToClockCycles(std::chrono::milliseconds ns) {
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000).first;
|
||||
}
|
||||
|
||||
u64 usToClockCycles(std::chrono::microseconds ns) {
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000000).first;
|
||||
}
|
||||
|
||||
u64 nsToClockCycles(std::chrono::nanoseconds ns) {
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000000000).first;
|
||||
}
|
||||
|
||||
u64 CpuCyclesToClockCycles(u64 ticks) {
|
||||
const u128 temporal = Common::Multiply64Into128(ticks, Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
}
|
||||
|
||||
std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000);
|
||||
u64 ms = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::milliseconds(ms);
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CyclesToNs(s64 cycles) {
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000000000);
|
||||
u64 ns = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::nanoseconds(ns);
|
||||
}
|
||||
|
||||
std::chrono::microseconds CyclesToUs(s64 cycles) {
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000000);
|
||||
u64 us = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::microseconds(us);
|
||||
}
|
||||
|
||||
} // namespace Core::Timing
|
||||
@@ -1,24 +1,59 @@
|
||||
// Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2+
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hardware_properties.h"
|
||||
|
||||
namespace Core::Timing {
|
||||
|
||||
s64 msToCycles(std::chrono::milliseconds ms);
|
||||
s64 usToCycles(std::chrono::microseconds us);
|
||||
s64 nsToCycles(std::chrono::nanoseconds ns);
|
||||
u64 msToClockCycles(std::chrono::milliseconds ns);
|
||||
u64 usToClockCycles(std::chrono::microseconds ns);
|
||||
u64 nsToClockCycles(std::chrono::nanoseconds ns);
|
||||
std::chrono::milliseconds CyclesToMs(s64 cycles);
|
||||
std::chrono::nanoseconds CyclesToNs(s64 cycles);
|
||||
std::chrono::microseconds CyclesToUs(s64 cycles);
|
||||
namespace detail {
|
||||
constexpr u64 CNTFREQ_ADJUSTED = Hardware::CNTFREQ / 1000;
|
||||
constexpr u64 BASE_CLOCK_RATE_ADJUSTED = Hardware::BASE_CLOCK_RATE / 1000;
|
||||
} // namespace detail
|
||||
|
||||
u64 CpuCyclesToClockCycles(u64 ticks);
|
||||
[[nodiscard]] constexpr s64 msToCycles(std::chrono::milliseconds ms) {
|
||||
return ms.count() * detail::BASE_CLOCK_RATE_ADJUSTED;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s64 usToCycles(std::chrono::microseconds us) {
|
||||
return us.count() * detail::BASE_CLOCK_RATE_ADJUSTED / 1000;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s64 nsToCycles(std::chrono::nanoseconds ns) {
|
||||
return ns.count() * detail::BASE_CLOCK_RATE_ADJUSTED / 1000000;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u64 msToClockCycles(std::chrono::milliseconds ms) {
|
||||
return static_cast<u64>(ms.count()) * detail::CNTFREQ_ADJUSTED;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u64 usToClockCycles(std::chrono::microseconds us) {
|
||||
return us.count() * detail::CNTFREQ_ADJUSTED / 1000;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u64 nsToClockCycles(std::chrono::nanoseconds ns) {
|
||||
return ns.count() * detail::CNTFREQ_ADJUSTED / 1000000;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u64 CpuCyclesToClockCycles(u64 ticks) {
|
||||
return ticks * detail::CNTFREQ_ADJUSTED / detail::BASE_CLOCK_RATE_ADJUSTED;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
||||
return std::chrono::milliseconds(cycles / detail::BASE_CLOCK_RATE_ADJUSTED);
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr std::chrono::nanoseconds CyclesToNs(s64 cycles) {
|
||||
return std::chrono::nanoseconds(cycles * 1000000 / detail::BASE_CLOCK_RATE_ADJUSTED);
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr std::chrono::microseconds CyclesToUs(s64 cycles) {
|
||||
return std::chrono::microseconds(cycles * 1000 / detail::BASE_CLOCK_RATE_ADJUSTED);
|
||||
}
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
||||
@@ -148,7 +148,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
@@ -245,7 +245,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
@@ -271,7 +271,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
scheduler.Unload(scheduler.GetCurrentThread());
|
||||
|
||||
auto& next_scheduler = kernel.Scheduler(current_core);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
|
||||
}
|
||||
|
||||
// May have changed scheduler
|
||||
@@ -363,7 +363,7 @@ void CpuManager::RunThread(std::size_t core) {
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
data.is_running = true;
|
||||
Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
data.is_running = false;
|
||||
data.is_paused = true;
|
||||
data.exit_barrier->Wait();
|
||||
|
||||
@@ -105,8 +105,6 @@ void AESCipher<Key, KeySize>::Transcode(const u8* src, std::size_t size, u8* des
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mbedtls_cipher_finish(context, nullptr, nullptr);
|
||||
}
|
||||
|
||||
template <typename Key, std::size_t KeySize>
|
||||
|
||||
@@ -14,7 +14,7 @@ namespace NgWord1Data {
|
||||
constexpr std::size_t NUMBER_WORD_TXT_FILES = 0x10;
|
||||
|
||||
// Should this archive replacement mysteriously not work on a future game, consider updating.
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x19}; // 5.1.0 System Version
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x20}; // 11.0.1 System Version
|
||||
|
||||
constexpr std::array<u8, 30> WORD_TXT{
|
||||
0xFE, 0xFF, 0x00, 0x5E, 0x00, 0x76, 0x00, 0x65, 0x00, 0x72, 0x00, 0x79, 0x00, 0x62, 0x00,
|
||||
@@ -43,7 +43,7 @@ namespace NgWord2Data {
|
||||
constexpr std::size_t NUMBER_AC_NX_FILES = 0x10;
|
||||
|
||||
// Should this archive replacement mysteriously not work on a future game, consider updating.
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x15}; // 5.1.0 System Version
|
||||
constexpr std::array<u8, 4> VERSION_DAT{0x0, 0x0, 0x0, 0x1A}; // 11.0.1 System Version
|
||||
|
||||
constexpr std::array<u8, 0x2C> AC_NX_DATA{
|
||||
0x1F, 0x8B, 0x08, 0x08, 0xD5, 0x2C, 0x09, 0x5C, 0x04, 0x00, 0x61, 0x63, 0x72, 0x61, 0x77,
|
||||
|
||||
@@ -14,15 +14,15 @@ namespace SystemVersionData {
|
||||
|
||||
constexpr u8 VERSION_MAJOR = 11;
|
||||
constexpr u8 VERSION_MINOR = 0;
|
||||
constexpr u8 VERSION_MICRO = 0;
|
||||
constexpr u8 VERSION_MICRO = 1;
|
||||
|
||||
constexpr u8 REVISION_MAJOR = 5;
|
||||
constexpr u8 REVISION_MAJOR = 1;
|
||||
constexpr u8 REVISION_MINOR = 0;
|
||||
|
||||
constexpr char PLATFORM_STRING[] = "NX";
|
||||
constexpr char VERSION_HASH[] = "34197eba8810e2edd5e9dfcfbde7b340882e856d";
|
||||
constexpr char DISPLAY_VERSION[] = "11.0.0";
|
||||
constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 11.0.0-5.0";
|
||||
constexpr char VERSION_HASH[] = "69103fcb2004dace877094c2f8c29e6113be5dbf";
|
||||
constexpr char DISPLAY_VERSION[] = "11.0.1";
|
||||
constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 11.0.1-1.0";
|
||||
|
||||
} // namespace SystemVersionData
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ struct ControllerParameters {
|
||||
bool allow_dual_joycons{};
|
||||
bool allow_left_joycon{};
|
||||
bool allow_right_joycon{};
|
||||
bool allow_gamecube_controller{};
|
||||
};
|
||||
|
||||
class ControllerApplet {
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -21,7 +21,7 @@ std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
if (active_sessions >= max_sessions) {
|
||||
return ERR_MAX_CONNECTIONS_REACHED;
|
||||
return ResultMaxConnectionsReached;
|
||||
}
|
||||
active_sessions++;
|
||||
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -43,7 +43,7 @@ ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
if (!parent->Server()) {
|
||||
return ERR_SESSION_CLOSED_BY_REMOTE;
|
||||
return ResultSessionClosedByRemote;
|
||||
}
|
||||
|
||||
// Signal the server session that new data is available
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Confirmed Switch kernel error codes
|
||||
|
||||
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
|
||||
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY{ErrorModule::Kernel, 112};
|
||||
constexpr ResultCode ERR_INVALID_HANDLE{ErrorModule::Kernel, 114};
|
||||
constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
|
||||
constexpr ResultCode ERR_BUSY{ErrorModule::Kernel, 122};
|
||||
constexpr ResultCode ERR_SESSION_CLOSED_BY_REMOTE{ErrorModule::Kernel, 123};
|
||||
constexpr ResultCode ERR_INVALID_STATE{ErrorModule::Kernel, 125};
|
||||
constexpr ResultCode ERR_RESERVED_VALUE{ErrorModule::Kernel, 126};
|
||||
constexpr ResultCode ERR_RESOURCE_LIMIT_EXCEEDED{ErrorModule::Kernel, 132};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -6,12 +6,12 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
@@ -33,7 +33,7 @@ HandleTable::~HandleTable() = default;
|
||||
ResultCode HandleTable::SetSize(s32 handle_table_size) {
|
||||
if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
|
||||
LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// Values less than or equal to zero indicate to use the maximum allowable
|
||||
@@ -53,7 +53,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
||||
const u16 slot = next_free_slot;
|
||||
if (slot >= table_size) {
|
||||
LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use.");
|
||||
return ERR_HANDLE_TABLE_FULL;
|
||||
return ResultHandleTableFull;
|
||||
}
|
||||
next_free_slot = generations[slot];
|
||||
|
||||
@@ -76,7 +76,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
std::shared_ptr<Object> object = GetGeneric(handle);
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
return Create(std::move(object));
|
||||
}
|
||||
@@ -84,7 +84,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
ResultCode HandleTable::Close(Handle handle) {
|
||||
if (!IsValid(handle)) {
|
||||
LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
const u16 slot = GetSlot(handle);
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
@@ -26,6 +25,7 @@
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
|
||||
@@ -120,10 +120,10 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
s32 user_value{};
|
||||
if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return Svc::ResultInvalidState;
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
@@ -189,10 +189,10 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
|
||||
if (!succeeded) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return Svc::ResultInvalidState;
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
@@ -221,11 +221,11 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
@@ -238,19 +238,19 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is less than the specified one.
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
return ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
@@ -288,29 +288,29 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, &user_value, addr)) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is equal.
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
return ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
|
||||
@@ -2,15 +2,12 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/memory/address_space_info.h"
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
@@ -28,20 +25,20 @@ enum : u64 {
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, },
|
||||
constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
@@ -49,7 +46,8 @@ constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
||||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
}
|
||||
|
||||
using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>;
|
||||
using IndexArray =
|
||||
std::array<std::size_t, static_cast<std::size_t>(KAddressSpaceInfo::Type::Count)>;
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices32Bit{
|
||||
0, 1, 0, 2, 0, 3,
|
||||
@@ -63,23 +61,23 @@ constexpr IndexArray AddressSpaceIndices39Bit{
|
||||
9, 8, 8, 10, 12, 11,
|
||||
};
|
||||
|
||||
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
|
||||
type != KAddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
|
||||
type != KAddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
|
||||
constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::MapLarge;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
@@ -99,7 +97,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
@@ -116,4 +114,4 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -2,20 +2,17 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
struct AddressSpaceInfo final {
|
||||
struct KAddressSpaceInfo final {
|
||||
enum class Type : u32 {
|
||||
Is32Bit = 0,
|
||||
Small64Bit = 1,
|
||||
Large64Bit = 2,
|
||||
MapSmall = 0,
|
||||
MapLarge = 1,
|
||||
Map39Bit = 2,
|
||||
Heap = 3,
|
||||
Stack = 4,
|
||||
Alias = 5,
|
||||
@@ -31,4 +28,4 @@ struct AddressSpaceInfo final {
|
||||
const Type type{};
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -92,10 +92,10 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (next_owner_thread) {
|
||||
next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,20 +114,20 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
|
||||
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
{
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||
Svc::ResultInvalidCurrentMemory);
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
|
||||
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
|
||||
R_UNLESS(owner_thread, ResultInvalidHandle);
|
||||
|
||||
// Update the lock.
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
@@ -191,13 +191,13 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||
thread_to_close = owner_thread.get();
|
||||
} else {
|
||||
// The lock was tagged with a thread that doesn't exist.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
||||
thread->SetSyncedObject(nullptr, ResultInvalidState);
|
||||
thread->Wakeup();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the address wasn't accessible, note so.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||
thread->Wakeup();
|
||||
}
|
||||
|
||||
@@ -263,12 +263,12 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Update the value and process for the next owner.
|
||||
@@ -302,7 +302,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,20 +2,17 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
enum class MemoryState : u32 {
|
||||
enum class KMemoryState : u32 {
|
||||
None = 0,
|
||||
Mask = 0xFF,
|
||||
All = ~None,
|
||||
@@ -97,31 +94,31 @@ enum class MemoryState : u32 {
|
||||
FlagReferenceCounted | FlagCanDebug,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
|
||||
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(KMemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
|
||||
|
||||
enum class MemoryPermission : u8 {
|
||||
enum class KMemoryPermission : u8 {
|
||||
None = 0,
|
||||
Mask = static_cast<u8>(~None),
|
||||
|
||||
@@ -135,9 +132,9 @@ enum class MemoryPermission : u8 {
|
||||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
|
||||
|
||||
enum class MemoryAttribute : u8 {
|
||||
enum class KMemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
@@ -152,18 +149,18 @@ enum class MemoryAttribute : u8 {
|
||||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
|
||||
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
|
||||
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
|
||||
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
|
||||
|
||||
struct MemoryInfo {
|
||||
struct KMemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
MemoryState state{};
|
||||
MemoryPermission perm{};
|
||||
MemoryAttribute attribute{};
|
||||
MemoryPermission original_perm{};
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
KMemoryAttribute attribute{};
|
||||
KMemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
|
||||
@@ -171,9 +168,9 @@ struct MemoryInfo {
|
||||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
|
||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
};
|
||||
@@ -196,21 +193,21 @@ struct MemoryInfo {
|
||||
}
|
||||
};
|
||||
|
||||
class MemoryBlock final {
|
||||
friend class MemoryBlockManager;
|
||||
class KMemoryBlock final {
|
||||
friend class KMemoryBlockManager;
|
||||
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
MemoryState state{MemoryState::None};
|
||||
KMemoryState state{KMemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
MemoryPermission perm{MemoryPermission::None};
|
||||
MemoryPermission original_perm{MemoryPermission::None};
|
||||
MemoryAttribute attribute{MemoryAttribute::None};
|
||||
KMemoryPermission perm{KMemoryPermission::None};
|
||||
KMemoryPermission original_perm{KMemoryPermission::None};
|
||||
KMemoryAttribute attribute{KMemoryAttribute::None};
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
|
||||
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
@@ -221,9 +218,9 @@ public:
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr MemoryBlock() = default;
|
||||
constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
|
||||
MemoryPermission perm_, MemoryAttribute attribute_)
|
||||
constexpr KMemoryBlock() = default;
|
||||
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
|
||||
KMemoryPermission perm_, KMemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
@@ -246,40 +243,40 @@ public:
|
||||
return GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr MemoryInfo GetMemoryInfo() const {
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
|
||||
void ShareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= MemoryAttribute::DeviceShared;
|
||||
attribute |= KMemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
}
|
||||
|
||||
void UnshareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
|
||||
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~MemoryAttribute::DeviceShared;
|
||||
attribute &= ~KMemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
|
||||
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
|
||||
MemoryAttribute::IpcLocked |
|
||||
MemoryAttribute::DeviceShared};
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
|
||||
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
@@ -296,25 +293,25 @@ private:
|
||||
num_pages += count;
|
||||
}
|
||||
|
||||
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
|
||||
MemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == MemoryPermission::None);
|
||||
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
|
||||
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
|
||||
KMemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == KMemoryPermission::None);
|
||||
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<MemoryAttribute>(
|
||||
attribute = static_cast<KMemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
|
||||
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
}
|
||||
|
||||
constexpr MemoryBlock Split(VAddr split_addr) {
|
||||
constexpr KMemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
|
||||
MemoryBlock block;
|
||||
KMemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
@@ -330,6 +327,6 @@ private:
|
||||
return block;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
|
||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -2,19 +2,19 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
: start_addr{start_addr}, end_addr{end_addr} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
|
||||
MemoryAttribute::None);
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryAttribute::None);
|
||||
}
|
||||
|
||||
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
@@ -26,9 +26,9 @@ MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||
return end();
|
||||
}
|
||||
|
||||
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align, std::size_t offset,
|
||||
std::size_t guard_pages) {
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align,
|
||||
std::size_t offset, std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
@@ -41,7 +41,7 @@ VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_nu
|
||||
break;
|
||||
}
|
||||
|
||||
if (info.state != MemoryState::Free) {
|
||||
if (info.state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -63,17 +63,17 @@ VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_nu
|
||||
return {};
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
|
||||
MemoryState state, MemoryPermission perm,
|
||||
MemoryAttribute attribute) {
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
|
||||
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
@@ -106,13 +106,13 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState p
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute) {
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
@@ -141,13 +141,13 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState s
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
MemoryPermission perm) {
|
||||
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
@@ -176,9 +176,9 @@ void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
MemoryInfo info{};
|
||||
KMemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
@@ -186,8 +186,8 @@ void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& f
|
||||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
|
||||
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
MemoryBlock* block{&(*it)};
|
||||
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
KMemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
@@ -197,7 +197,7 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
MemoryBlock* prev{&(*std::prev(it))};
|
||||
KMemoryBlock* prev{&(*std::prev(it))};
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
@@ -211,7 +211,7 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const MemoryBlock* const next{&(*std::next(it))};
|
||||
const KMemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
@@ -220,4 +220,4 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -8,18 +8,18 @@
|
||||
#include <list>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class MemoryBlockManager final {
|
||||
class KMemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<MemoryBlock>;
|
||||
using MemoryBlockTree = std::list<KMemoryBlock>;
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
KMemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
@@ -36,21 +36,22 @@ public:
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute);
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm = MemoryPermission::None,
|
||||
MemoryAttribute attribute = MemoryAttribute::None);
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm = KMemoryPermission::None,
|
||||
KMemoryAttribute attribute = KMemoryAttribute::None);
|
||||
|
||||
using LockFunc = std::function<void(iterator, MemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
|
||||
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm);
|
||||
|
||||
using IterateFunc = std::function<void(const MemoryInfo&)>;
|
||||
using IterateFunc = std::function<void(const KMemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
|
||||
MemoryBlock& FindBlock(VAddr addr) {
|
||||
KMemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
}
|
||||
|
||||
@@ -63,4 +64,4 @@ private:
|
||||
MemoryBlockTree memory_block_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
@@ -27,8 +27,8 @@ constexpr bool IsKernelAddress(VAddr address) {
|
||||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class MemoryRegion final {
|
||||
friend class MemoryLayout;
|
||||
class KMemoryRegion final {
|
||||
friend class KMemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
@@ -40,29 +40,29 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryRegion() = default;
|
||||
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class MemoryLayout final {
|
||||
class KMemoryLayout final {
|
||||
public:
|
||||
constexpr const MemoryRegion& Application() const {
|
||||
constexpr const KMemoryRegion& Application() const {
|
||||
return application;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& Applet() const {
|
||||
constexpr const KMemoryRegion& Applet() const {
|
||||
return applet;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& System() const {
|
||||
constexpr const KMemoryRegion& System() const {
|
||||
return system;
|
||||
}
|
||||
|
||||
static constexpr MemoryLayout GetDefaultLayout() {
|
||||
static constexpr KMemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
@@ -76,15 +76,15 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
|
||||
const MemoryRegion application;
|
||||
const MemoryRegion applet;
|
||||
const MemoryRegion system;
|
||||
const KMemoryRegion application;
|
||||
const KMemoryRegion applet;
|
||||
const KMemoryRegion system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -8,20 +8,20 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
const auto size{end_address - start_address};
|
||||
|
||||
// Calculate metadata sizes
|
||||
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
||||
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
||||
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
||||
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
|
||||
const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
|
||||
const auto total_metadata_size{manager_size + page_heap_size};
|
||||
ASSERT(manager_size <= total_metadata_size);
|
||||
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
||||
@@ -41,29 +41,30 @@ std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u6
|
||||
return total_metadata_size;
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
ASSERT(pool < Pool::Count);
|
||||
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
||||
}
|
||||
|
||||
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir) {
|
||||
VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
|
||||
u32 option) {
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
|
||||
// Loop, trying to iterate from each block
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
|
||||
|
||||
// If we failed to allocate, quit now
|
||||
if (!allocated_block) {
|
||||
@@ -71,7 +72,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
||||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
@@ -79,8 +80,8 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
||||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ASSERT(page_list.GetNumPages() == 0);
|
||||
|
||||
// Early return if we're allocating no pages
|
||||
@@ -93,9 +94,9 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
|
||||
const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
|
||||
if (heap_index < 0) {
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// TODO (bunnei): Support multiple managers
|
||||
@@ -112,11 +113,11 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
||||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
|
||||
if (!allocated_block) {
|
||||
break;
|
||||
}
|
||||
@@ -140,7 +141,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
||||
|
||||
// Only succeed if we allocated as many pages as we wanted
|
||||
if (num_pages) {
|
||||
return ERR_OUT_OF_MEMORY;
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
// We succeeded!
|
||||
@@ -148,8 +149,8 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're freeing no pages
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
@@ -172,4 +173,4 @@ ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages,
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -6,16 +6,18 @@
|
||||
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/kernel/k_page_heap.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class PageLinkedList;
|
||||
class KPageLinkedList;
|
||||
|
||||
class MemoryManager final : NonCopyable {
|
||||
class KMemoryManager final : NonCopyable {
|
||||
public:
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
@@ -37,29 +39,50 @@ public:
|
||||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
MemoryManager() = default;
|
||||
KMemoryManager() = default;
|
||||
|
||||
constexpr std::size_t GetSize(Pool pool) const {
|
||||
return managers[static_cast<std::size_t>(pool)].GetSize();
|
||||
}
|
||||
|
||||
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
|
||||
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
|
||||
VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
public:
|
||||
static constexpr u32 EncodeOption(Pool pool, Direction dir) {
|
||||
return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) |
|
||||
(static_cast<u32>(dir) << static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr Pool GetPool(u32 option) {
|
||||
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
|
||||
static_cast<u32>(Pool::Shift));
|
||||
}
|
||||
|
||||
static constexpr Direction GetDirection(u32 option) {
|
||||
return static_cast<Direction>(
|
||||
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||
return std::make_tuple(GetPool(option), GetDirection(option));
|
||||
}
|
||||
|
||||
private:
|
||||
class Impl final : NonCopyable {
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
private:
|
||||
PageHeap heap;
|
||||
KPageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
@@ -67,8 +90,8 @@ private:
|
||||
|
||||
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
|
||||
|
||||
VAddr AllocateBlock(s32 index) {
|
||||
return heap.AllocateBlock(index);
|
||||
VAddr AllocateBlock(s32 index, bool random) {
|
||||
return heap.AllocateBlock(index, random);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, std::size_t num_pages) {
|
||||
@@ -93,4 +116,4 @@ private:
|
||||
std::array<Impl, MaxManagerCount> managers;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
279
src/core/hle/kernel/k_page_bitmap.h
Normal file
279
src/core/hle/kernel/k_page_bitmap.h
Normal file
@@ -0,0 +1,279 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/tiny_mt.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageBitmap {
|
||||
private:
|
||||
class RandomBitGenerator {
|
||||
private:
|
||||
Common::TinyMT rng{};
|
||||
u32 entropy{};
|
||||
u32 bits_available{};
|
||||
|
||||
private:
|
||||
void RefreshEntropy() {
|
||||
entropy = rng.GenerateRandomU32();
|
||||
bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>());
|
||||
}
|
||||
|
||||
bool GenerateRandomBit() {
|
||||
if (bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
const bool rnd_bit = (entropy & 1) != 0;
|
||||
entropy >>= 1;
|
||||
--bits_available;
|
||||
return rnd_bit;
|
||||
}
|
||||
|
||||
public:
|
||||
RandomBitGenerator() {
|
||||
rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||
}
|
||||
|
||||
std::size_t SelectRandomBit(u64 bitmap) {
|
||||
u64 selected = 0;
|
||||
|
||||
u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2;
|
||||
u64 cur_mask = (1ULL << cur_num_bits) - 1;
|
||||
|
||||
while (cur_num_bits) {
|
||||
const u64 low = (bitmap >> 0) & cur_mask;
|
||||
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
|
||||
|
||||
bool choose_low;
|
||||
if (high == 0) {
|
||||
// If only low val is set, choose low.
|
||||
choose_low = true;
|
||||
} else if (low == 0) {
|
||||
// If only high val is set, choose high.
|
||||
choose_low = false;
|
||||
} else {
|
||||
// If both are set, choose random.
|
||||
choose_low = this->GenerateRandomBit();
|
||||
}
|
||||
|
||||
// If we chose low, proceed with low.
|
||||
if (choose_low) {
|
||||
bitmap = low;
|
||||
selected += 0;
|
||||
} else {
|
||||
bitmap = high;
|
||||
selected += cur_num_bits;
|
||||
}
|
||||
|
||||
// Proceed.
|
||||
cur_num_bits /= 2;
|
||||
cur_mask >>= cur_num_bits;
|
||||
}
|
||||
|
||||
return selected;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth = 4;
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
RandomBitGenerator rng{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
KPageBitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
u64* Initialize(u64* storage, std::size_t size) {
|
||||
// Initially, everything is un-set.
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth.
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers.
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock(bool random) {
|
||||
uintptr_t offset = 0;
|
||||
s32 depth = 0;
|
||||
|
||||
if (random) {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
} else {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + std::countr_zero(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
}
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
void SetBit(std::size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(std::size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64* bits = bit_storages[depth];
|
||||
std::size_t bit_ind = offset / Common::BitSize<u64>();
|
||||
if (count < Common::BitSize<u64>()) {
|
||||
const std::size_t shift = offset % Common::BitSize<u64>();
|
||||
ASSERT(shift + count <= Common::BitSize<u64>());
|
||||
// Check that all the bits are set.
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
u64 v = bits[bit_ind];
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits.
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
this->ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % Common::BitSize<u64>() == 0);
|
||||
ASSERT(count % Common::BitSize<u64>() == 0);
|
||||
// Check that all the bits are set.
|
||||
std::size_t remaining = count;
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= Common::BitSize<u64>();
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits.
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
this->ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= Common::BitSize<u64>();
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= Common::BitSize<u64>();
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size =
|
||||
Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -2,16 +2,13 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/kernel/k_page_heap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
// Check our assumptions
|
||||
ASSERT(Common::IsAligned((address), PageSize));
|
||||
ASSERT(Common::IsAligned(size, PageSize));
|
||||
@@ -32,11 +29,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
|
||||
}
|
||||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const VAddr addr{blocks[i].PopBlock(random)}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
@@ -48,13 +45,13 @@ VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
void KPageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
// Freeing no pages is a no-op
|
||||
if (num_pages == 0) {
|
||||
return;
|
||||
@@ -104,16 +101,16 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
}
|
||||
}
|
||||
|
||||
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_size = 0;
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
|
||||
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
|
||||
region_size, cur_block_shift, next_block_shift);
|
||||
}
|
||||
return Common::AlignUp(overhead_size, PageSize);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
193
src/core/hle/kernel/k_page_heap.h
Normal file
193
src/core/hle/kernel/k_page_heap.h
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_bitmap.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
KPageBitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock(bool random) {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock(random)};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return KPageBitmap::CalculateManagementOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
KPageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index, bool random);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -8,12 +8,12 @@
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class PageLinkedList final {
|
||||
class KPageLinkedList final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
@@ -33,8 +33,8 @@ public:
|
||||
};
|
||||
|
||||
public:
|
||||
PageLinkedList() = default;
|
||||
PageLinkedList(u64 address, u64 num_pages) {
|
||||
KPageLinkedList() = default;
|
||||
KPageLinkedList(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ public:
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(PageLinkedList& other) const {
|
||||
bool IsEqual(KPageLinkedList& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
@@ -89,4 +89,4 @@ private:
|
||||
std::list<Node> nodes;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,27 +10,27 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class MemoryBlockManager;
|
||||
class KMemoryBlockManager;
|
||||
|
||||
class PageTable final : NonCopyable {
|
||||
class KPageTable final : NonCopyable {
|
||||
public:
|
||||
explicit PageTable(Core::System& system);
|
||||
explicit KPageTable(Core::System& system);
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
Memory::MemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
@@ -38,20 +38,20 @@ public:
|
||||
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
MemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
|
||||
MemoryAttribute value);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask,
|
||||
KMemoryAttribute value);
|
||||
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
|
||||
ResultVal<VAddr> SetHeapSize(std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, MemoryState state,
|
||||
MemoryPermission perm, PAddr map_addr = 0);
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
|
||||
@@ -72,47 +72,49 @@ private:
|
||||
ChangePermissionsAndRefresh,
|
||||
};
|
||||
|
||||
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
|
||||
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm);
|
||||
void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
|
||||
MemoryInfo QueryInfoImpl(VAddr addr);
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
constexpr VAddr GetRegionAddress(MemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(MemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
|
||||
constexpr VAddr GetRegionAddress(KMemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(KMemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
|
||||
|
||||
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
|
||||
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
MemoryState state_mask, MemoryState state,
|
||||
MemoryPermission perm_mask, MemoryPermission perm,
|
||||
MemoryAttribute attr_mask, MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
std::recursive_mutex page_table_lock;
|
||||
std::unique_ptr<MemoryBlockManager> block_manager;
|
||||
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
@@ -212,7 +214,7 @@ public:
|
||||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr PAddr GetPhysicalAddr(VAddr addr) {
|
||||
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
|
||||
return page_table_impl.backing_addr[addr >> PageBits] + addr;
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -267,11 +269,11 @@ private:
|
||||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
|
||||
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
|
||||
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -6,7 +6,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
@@ -47,7 +46,7 @@ ResultCode KReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
return Svc::ResultInvalidState;
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
is_signaled = false;
|
||||
|
||||
@@ -75,7 +75,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
|
||||
ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
R_UNLESS(current_values[index] <= value, Svc::ResultInvalidState);
|
||||
R_UNLESS(current_values[index] <= value, ResultInvalidState);
|
||||
|
||||
limit_values[index] = value;
|
||||
|
||||
|
||||
@@ -734,7 +734,7 @@ void KScheduler::ScheduleImpl() {
|
||||
}
|
||||
guard.unlock();
|
||||
|
||||
Common::Fiber::YieldTo(*old_context, switch_fiber);
|
||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||
/// When a thread wakes up, the scheduler may have changed to other in another core.
|
||||
auto& next_scheduler = *system.Kernel().CurrentScheduler();
|
||||
next_scheduler.SwitchContextStep2();
|
||||
@@ -769,13 +769,8 @@ void KScheduler::SwitchToCurrent() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Common::Fiber>* next_context;
|
||||
if (next_thread != nullptr) {
|
||||
next_context = &next_thread->GetHostContext();
|
||||
} else {
|
||||
next_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
Common::Fiber::YieldTo(switch_fiber, *next_context);
|
||||
auto thread = next_thread ? next_thread : idle_thread;
|
||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||
} while (!is_switch_pending());
|
||||
}
|
||||
}
|
||||
@@ -800,9 +795,9 @@ void KScheduler::Initialize() {
|
||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
|
||||
KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::CreateThread(
|
||||
system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0,
|
||||
static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter);
|
||||
idle_thread = thread_res.Unwrap().get();
|
||||
}
|
||||
|
||||
|
||||
67
src/core/hle/kernel/k_scoped_resource_reservation.h
Normal file
67
src/core/hle/kernel/k_scoped_resource_reservation.h
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KScopedResourceReservation {
|
||||
public:
|
||||
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
|
||||
s64 v, s64 timeout)
|
||||
: resource_limit(std::move(l)), value(v), resource(r) {
|
||||
if (resource_limit && value) {
|
||||
success = resource_limit->Reserve(resource, value, timeout);
|
||||
} else {
|
||||
success = true;
|
||||
}
|
||||
}
|
||||
|
||||
explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r,
|
||||
s64 v = 1)
|
||||
: resource_limit(std::move(l)), value(v), resource(r) {
|
||||
if (resource_limit && value) {
|
||||
success = resource_limit->Reserve(resource, value);
|
||||
} else {
|
||||
success = true;
|
||||
}
|
||||
}
|
||||
|
||||
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t)
|
||||
: KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {}
|
||||
|
||||
explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1)
|
||||
: KScopedResourceReservation(p->GetResourceLimit(), r, v) {}
|
||||
|
||||
~KScopedResourceReservation() noexcept {
|
||||
if (resource_limit && value && success) {
|
||||
// resource was not committed, release the reservation.
|
||||
resource_limit->Release(resource, value);
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit the resource reservation, destruction of this object does not release the resource
|
||||
void Commit() {
|
||||
resource_limit = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Succeeded() const {
|
||||
return success;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<KResourceLimit> resource_limit;
|
||||
s64 value;
|
||||
LimitableResource resource;
|
||||
bool success;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
65
src/core/hle/kernel/k_shared_memory.cpp
Normal file
65
src/core/hle/kernel/k_shared_memory.cpp
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||
: Object{kernel}, device_memory{device_memory} {}
|
||||
|
||||
KSharedMemory::~KSharedMemory() {
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||
}
|
||||
|
||||
std::shared_ptr<KSharedMemory> KSharedMemory::Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
|
||||
KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) {
|
||||
|
||||
const auto resource_limit = kernel.GetSystemResourceLimit();
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
size);
|
||||
ASSERT(memory_reservation.Succeeded());
|
||||
|
||||
std::shared_ptr<KSharedMemory> shared_memory{
|
||||
std::make_shared<KSharedMemory>(kernel, device_memory)};
|
||||
|
||||
shared_memory->owner_process = owner_process;
|
||||
shared_memory->page_list = std::move(page_list);
|
||||
shared_memory->owner_permission = owner_permission;
|
||||
shared_memory->user_permission = user_permission;
|
||||
shared_memory->physical_address = physical_address;
|
||||
shared_memory->size = size;
|
||||
shared_memory->name = name;
|
||||
|
||||
memory_reservation.Commit();
|
||||
return shared_memory;
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
|
||||
KMemoryPermission permissions) {
|
||||
const u64 page_count{(size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
UNIMPLEMENTED_MSG("Page count does not match");
|
||||
}
|
||||
|
||||
const KMemoryPermission expected =
|
||||
&target_process == owner_process ? owner_permission : user_permission;
|
||||
|
||||
if (permissions != expected) {
|
||||
UNIMPLEMENTED_MSG("Permission does not match");
|
||||
}
|
||||
|
||||
return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared,
|
||||
permissions);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -19,15 +19,15 @@ namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class SharedMemory final : public Object {
|
||||
class KSharedMemory final : public Object {
|
||||
public:
|
||||
explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~SharedMemory() override;
|
||||
explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~KSharedMemory() override;
|
||||
|
||||
static std::shared_ptr<SharedMemory> Create(
|
||||
static std::shared_ptr<KSharedMemory> Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
|
||||
KMemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
@@ -51,7 +51,7 @@ public:
|
||||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
KMemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
@@ -76,9 +76,9 @@ public:
|
||||
private:
|
||||
Core::DeviceMemory& device_memory;
|
||||
Process* owner_process{};
|
||||
Memory::PageLinkedList page_list;
|
||||
Memory::MemoryPermission owner_permission{};
|
||||
Memory::MemoryPermission user_permission{};
|
||||
KPageLinkedList page_list;
|
||||
KMemoryPermission owner_permission{};
|
||||
KMemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
std::size_t size{};
|
||||
std::string name;
|
||||
@@ -2,9 +2,6 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
@@ -12,17 +9,17 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class SlabHeapImpl final : NonCopyable {
|
||||
class KSlabHeapImpl final : NonCopyable {
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
constexpr SlabHeapImpl() = default;
|
||||
constexpr KSlabHeapImpl() = default;
|
||||
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
@@ -65,9 +62,9 @@ private:
|
||||
|
||||
} // namespace impl
|
||||
|
||||
class SlabHeapBase : NonCopyable {
|
||||
class KSlabHeapBase : NonCopyable {
|
||||
public:
|
||||
constexpr SlabHeapBase() = default;
|
||||
constexpr KSlabHeapBase() = default;
|
||||
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
@@ -126,7 +123,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
using Impl = impl::SlabHeapImpl;
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
@@ -135,9 +132,9 @@ private:
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class SlabHeap final : public SlabHeapBase {
|
||||
class KSlabHeap final : public KSlabHeapBase {
|
||||
public:
|
||||
constexpr SlabHeap() : SlabHeapBase() {}
|
||||
constexpr KSlabHeap() : KSlabHeapBase() {}
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
@@ -160,4 +157,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
54
src/core/hle/kernel/k_spin_lock.cpp
Normal file
54
src/core/hle/kernel/k_spin_lock.cpp
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
|
||||
#if _MSC_VER
|
||||
#include <intrin.h>
|
||||
#if _M_AMD64
|
||||
#define __x86_64__ 1
|
||||
#endif
|
||||
#if _M_ARM64
|
||||
#define __aarch64__ 1
|
||||
#endif
|
||||
#else
|
||||
#if __x86_64__
|
||||
#include <xmmintrin.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
void ThreadPause() {
|
||||
#if __x86_64__
|
||||
_mm_pause();
|
||||
#elif __aarch64__ && _MSC_VER
|
||||
__yield();
|
||||
#elif __aarch64__
|
||||
asm("yield");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
void KSpinLock::Lock() {
|
||||
while (lck.test_and_set(std::memory_order_acquire)) {
|
||||
ThreadPause();
|
||||
}
|
||||
}
|
||||
|
||||
void KSpinLock::Unlock() {
|
||||
lck.clear(std::memory_order_release);
|
||||
}
|
||||
|
||||
bool KSpinLock::TryLock() {
|
||||
if (lck.test_and_set(std::memory_order_acquire)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
33
src/core/hle/kernel/k_spin_lock.h
Normal file
33
src/core/hle/kernel/k_spin_lock.h
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSpinLock {
|
||||
public:
|
||||
KSpinLock() = default;
|
||||
|
||||
KSpinLock(const KSpinLock&) = delete;
|
||||
KSpinLock& operator=(const KSpinLock&) = delete;
|
||||
|
||||
KSpinLock(KSpinLock&&) = delete;
|
||||
KSpinLock& operator=(KSpinLock&&) = delete;
|
||||
|
||||
void Lock();
|
||||
void Unlock();
|
||||
[[nodiscard]] bool TryLock();
|
||||
|
||||
private:
|
||||
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||
};
|
||||
|
||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -40,20 +40,20 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||
// Check if the timeout is zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
return ResultTimedOut;
|
||||
}
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Check if waiting was canceled.
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
thread->ClearWaitCancelled();
|
||||
return Svc::ResultCancelled;
|
||||
return ResultCancelled;
|
||||
}
|
||||
|
||||
// Add the waiters.
|
||||
@@ -75,7 +75,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||
|
||||
// Mark the thread as waiting.
|
||||
thread->SetCancellable();
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/memory/system_control.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
@@ -25,16 +26,17 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
}
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64ForInit() {
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
||||
} // namespace Kernel
|
||||
19
src/core/hle/kernel/k_system_control.h
Normal file
19
src/core/hle/kernel/k_system_control.h
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
KSystemControl() = default;
|
||||
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -18,16 +18,15 @@
|
||||
#include "core/core.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
@@ -127,7 +126,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||
|
||||
// Set core ID and wait result.
|
||||
core_id = phys_core;
|
||||
wait_result = Svc::ResultNoSynchronizationObject;
|
||||
wait_result = ResultNoSynchronizationObject;
|
||||
|
||||
// Set priorities.
|
||||
priority = prio;
|
||||
@@ -238,7 +237,7 @@ void KThread::Finalize() {
|
||||
while (it != waiter_list.end()) {
|
||||
// The thread shouldn't be a kernel waiter.
|
||||
it->SetLockOwner(nullptr);
|
||||
it->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
||||
it->SetSyncedObject(nullptr, ResultInvalidState);
|
||||
it->Wakeup();
|
||||
it = waiter_list.erase(it);
|
||||
}
|
||||
@@ -447,7 +446,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
||||
// If the core id is no-update magic, preserve the ideal core id.
|
||||
if (core_id == Svc::IdealCoreNoUpdate) {
|
||||
core_id = virtual_ideal_core_id;
|
||||
R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, Svc::ResultInvalidCombination);
|
||||
R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
|
||||
}
|
||||
|
||||
// Set the virtual core/affinity mask.
|
||||
@@ -526,7 +525,7 @@ ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
|
||||
if (GetStackParameters().is_pinned) {
|
||||
// Verify that the current thread isn't terminating.
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||
Svc::ResultTerminationRequested);
|
||||
ResultTerminationRequested);
|
||||
|
||||
// Note that the thread was pinned.
|
||||
thread_is_pinned = true;
|
||||
@@ -604,7 +603,7 @@ void KThread::WaitCancel() {
|
||||
sleeping_queue->WakeupThread(this);
|
||||
wait_cancelled = true;
|
||||
} else {
|
||||
SetSyncedObject(nullptr, Svc::ResultCancelled);
|
||||
SetSyncedObject(nullptr, ResultCancelled);
|
||||
SetState(ThreadState::Runnable);
|
||||
wait_cancelled = false;
|
||||
}
|
||||
@@ -663,12 +662,12 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
// Verify our state.
|
||||
const auto cur_state = GetState();
|
||||
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
||||
Svc::ResultInvalidState);
|
||||
ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == Svc::ThreadActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
||||
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
|
||||
// Suspend.
|
||||
RequestSuspend(SuspendType::Thread);
|
||||
@@ -676,7 +675,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
|
||||
// Resume.
|
||||
Resume(SuspendType::Thread);
|
||||
@@ -698,7 +697,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||
if (GetStackParameters().is_pinned) {
|
||||
// Verify that the current thread isn't terminating.
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||
Svc::ResultTerminationRequested);
|
||||
ResultTerminationRequested);
|
||||
|
||||
// Note that the thread was pinned and not current.
|
||||
thread_is_pinned = true;
|
||||
@@ -745,7 +744,7 @@ ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Verify that we're suspended.
|
||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
|
||||
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||
|
||||
// If we're not terminating, get the thread's user context.
|
||||
if (!IsTerminationRequested()) {
|
||||
@@ -783,7 +782,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
|
||||
}
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters++) >= 0);
|
||||
}
|
||||
|
||||
@@ -796,7 +795,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
|
||||
@@ -871,7 +870,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
KThread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
it = waiter_list.erase(it);
|
||||
@@ -905,12 +904,11 @@ ResultCode KThread::Run() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
// If either this thread or the current thread are requesting termination, note it.
|
||||
R_UNLESS(!IsTerminationRequested(), Svc::ResultTerminationRequested);
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||
Svc::ResultTerminationRequested);
|
||||
R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
|
||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
// Ensure our thread state is correct.
|
||||
R_UNLESS(GetState() == ThreadState::Initialized, Svc::ResultInvalidState);
|
||||
R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
|
||||
|
||||
// If the current thread has been asked to suspend, suspend it and retry.
|
||||
if (GetCurrentThread(kernel).IsSuspended()) {
|
||||
@@ -962,7 +960,7 @@ ResultCode KThread::Sleep(s64 timeout) {
|
||||
// Check if the thread should terminate.
|
||||
if (IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
return ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Mark the thread as waiting.
|
||||
@@ -997,22 +995,11 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
|
||||
return host_context;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
|
||||
owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
|
||||
std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func,
|
||||
void* thread_start_parameter) {
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system,
|
||||
ThreadType type_flags, std::string name,
|
||||
VAddr entry_point, u32 priority, u64 arg,
|
||||
s32 processor_id, VAddr stack_top,
|
||||
Process* owner_process) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
|
||||
@@ -1029,12 +1016,35 @@ ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, Thread
|
||||
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||
scheduler.AddThread(thread);
|
||||
|
||||
thread->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
|
||||
return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) {
|
||||
auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg,
|
||||
processor_id, stack_top, owner_process);
|
||||
|
||||
if (thread_result.Succeeded()) {
|
||||
(*thread_result)->host_context =
|
||||
std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
|
||||
}
|
||||
|
||||
return thread_result;
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority,
|
||||
u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) {
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
|
||||
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
|
||||
return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id,
|
||||
stack_top, owner_process, std::move(init_func), init_func_parameter);
|
||||
}
|
||||
|
||||
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
|
||||
return kernel.GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ public:
|
||||
using WaiterList = boost::intrusive::list<KThread>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* Creates and returns a new thread.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
@@ -127,12 +127,12 @@ public:
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* Creates and returns a new thread, with a specified entry point.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
@@ -145,11 +145,27 @@ public:
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread for the emulated "user" process.
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
|
||||
|
||||
[[nodiscard]] std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
@@ -26,19 +26,19 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -68,9 +68,9 @@ struct KernelCore::Impl {
|
||||
InitializePhysicalCores();
|
||||
InitializeSystemResourceLimit(kernel, system);
|
||||
InitializeMemoryLayout();
|
||||
InitializePreemption(kernel);
|
||||
InitializeSchedulers();
|
||||
InitializeSuspendThreads();
|
||||
InitializePreemption(kernel);
|
||||
}
|
||||
|
||||
void InitializeCores() {
|
||||
@@ -101,8 +101,6 @@ struct KernelCore::Impl {
|
||||
|
||||
current_process = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
|
||||
global_handle_table.Clear();
|
||||
|
||||
preemption_event = nullptr;
|
||||
@@ -111,6 +109,13 @@ struct KernelCore::Impl {
|
||||
|
||||
exclusive_monitor.reset();
|
||||
|
||||
hid_shared_mem = nullptr;
|
||||
font_shared_mem = nullptr;
|
||||
irs_shared_mem = nullptr;
|
||||
time_shared_mem = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
|
||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||
}
|
||||
@@ -141,11 +146,17 @@ struct KernelCore::Impl {
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 700).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200)
|
||||
.IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 900).IsSuccess());
|
||||
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 933).IsSuccess());
|
||||
|
||||
if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, 0x60000)) {
|
||||
// Derived from recent software updates. The kernel reserves 27MB
|
||||
constexpr u64 kernel_size{0x1b00000};
|
||||
if (!system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size)) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
// Reserve secure applet memory, introduced in firmware 5.0.0
|
||||
constexpr u64 secure_applet_memory_size{0x400000};
|
||||
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
|
||||
secure_applet_memory_size));
|
||||
}
|
||||
|
||||
void InitializePreemption(KernelCore& kernel) {
|
||||
@@ -170,9 +181,9 @@ struct KernelCore::Impl {
|
||||
std::string name = "Suspend Thread Id:" + std::to_string(i);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
|
||||
0, 0, static_cast<u32>(i), 0, nullptr,
|
||||
std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::CreateThread(
|
||||
system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
}
|
||||
@@ -210,10 +221,9 @@ struct KernelCore::Impl {
|
||||
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
|
||||
KThread* GetHostDummyThread() {
|
||||
const thread_local auto thread =
|
||||
KThread::Create(
|
||||
KThread::CreateThread(
|
||||
system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
|
||||
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr,
|
||||
[]([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr)
|
||||
KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr)
|
||||
.Unwrap();
|
||||
return thread.get();
|
||||
}
|
||||
@@ -260,7 +270,7 @@ struct KernelCore::Impl {
|
||||
|
||||
void InitializeMemoryLayout() {
|
||||
// Initialize memory layout
|
||||
constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()};
|
||||
constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()};
|
||||
constexpr std::size_t hid_size{0x40000};
|
||||
constexpr std::size_t font_size{0x1100000};
|
||||
constexpr std::size_t irs_size{0x8000};
|
||||
@@ -271,39 +281,42 @@ struct KernelCore::Impl {
|
||||
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
// Initialize memory manager
|
||||
memory_manager = std::make_unique<Memory::MemoryManager>();
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application,
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
layout.Application().StartAddress(),
|
||||
layout.Application().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet,
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet,
|
||||
layout.Applet().StartAddress(),
|
||||
layout.Applet().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::System,
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System,
|
||||
layout.System().StartAddress(),
|
||||
layout.System().EndAddress());
|
||||
|
||||
hid_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory");
|
||||
font_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory");
|
||||
hid_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size,
|
||||
"HID:SharedMemory");
|
||||
font_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size,
|
||||
"Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size,
|
||||
"IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size,
|
||||
"Time:SharedMemory");
|
||||
|
||||
// Allocate slab heaps
|
||||
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
||||
user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
|
||||
|
||||
constexpr u64 user_slab_heap_size{0x1ef000};
|
||||
// Reserve slab heaps
|
||||
ASSERT(
|
||||
system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
|
||||
// Initialize slab heaps
|
||||
constexpr u64 user_slab_heap_size{0x3de000};
|
||||
user_slab_heap_pages->Initialize(
|
||||
system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
|
||||
user_slab_heap_size);
|
||||
@@ -339,14 +352,14 @@ struct KernelCore::Impl {
|
||||
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
|
||||
|
||||
// Kernel memory management
|
||||
std::unique_ptr<Memory::MemoryManager> memory_manager;
|
||||
std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages;
|
||||
std::unique_ptr<KMemoryManager> memory_manager;
|
||||
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
||||
|
||||
// Shared memory for services
|
||||
std::shared_ptr<Kernel::SharedMemory> hid_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> font_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> font_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> time_shared_mem;
|
||||
|
||||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
@@ -564,51 +577,51 @@ KThread* KernelCore::GetCurrentEmuThread() const {
|
||||
return impl->GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
Memory::MemoryManager& KernelCore::MemoryManager() {
|
||||
KMemoryManager& KernelCore::MemoryManager() {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
const Memory::MemoryManager& KernelCore::MemoryManager() const {
|
||||
const KMemoryManager& KernelCore::MemoryManager() const {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetHidSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetHidSharedMem() const {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetFontSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetFontSharedMem() {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetFontSharedMem() const {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetIrsSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() const {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetTimeSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include <vector>
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -27,25 +27,23 @@ struct EventType;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class MemoryManager;
|
||||
template <typename T>
|
||||
class SlabHeap;
|
||||
} // namespace Memory
|
||||
|
||||
class ClientPort;
|
||||
class GlobalSchedulerContext;
|
||||
class HandleTable;
|
||||
class PhysicalCore;
|
||||
class Process;
|
||||
class KMemoryManager;
|
||||
class KResourceLimit;
|
||||
class KScheduler;
|
||||
class SharedMemory;
|
||||
class KSharedMemory;
|
||||
class KThread;
|
||||
class PhysicalCore;
|
||||
class Process;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
class KThread;
|
||||
class TimeManager;
|
||||
|
||||
template <typename T>
|
||||
class KSlabHeap;
|
||||
|
||||
using EmuThreadHandle = uintptr_t;
|
||||
constexpr EmuThreadHandle EmuThreadHandleInvalid{};
|
||||
constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
|
||||
@@ -178,40 +176,40 @@ public:
|
||||
void RegisterHostThread();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
Memory::MemoryManager& MemoryManager();
|
||||
KMemoryManager& MemoryManager();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
const Memory::MemoryManager& MemoryManager() const;
|
||||
const KMemoryManager& MemoryManager() const;
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages();
|
||||
KSlabHeap<Page>& GetUserSlabHeapPages();
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const;
|
||||
const KSlabHeap<Page>& GetUserSlabHeapPages() const;
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
Kernel::SharedMemory& GetHidSharedMem();
|
||||
Kernel::KSharedMemory& GetHidSharedMem();
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
const Kernel::SharedMemory& GetHidSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetHidSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
Kernel::SharedMemory& GetFontSharedMem();
|
||||
Kernel::KSharedMemory& GetFontSharedMem();
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
const Kernel::SharedMemory& GetFontSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetFontSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
Kernel::SharedMemory& GetIrsSharedMem();
|
||||
Kernel::KSharedMemory& GetIrsSharedMem();
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
const Kernel::SharedMemory& GetIrsSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetIrsSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
Kernel::SharedMemory& GetTimeSharedMem();
|
||||
Kernel::KSharedMemory& GetTimeSharedMem();
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
const Kernel::SharedMemory& GetTimeSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetTimeSharedMem() const;
|
||||
|
||||
/// Suspend/unsuspend the OS.
|
||||
void Suspend(bool in_suspention);
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
class Bitmap final : NonCopyable {
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth{4};
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
constexpr Bitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
||||
//* Initially, everything is un-set
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock() const {
|
||||
uintptr_t offset{};
|
||||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
constexpr void SetBit(std::size_t offset) {
|
||||
SetBit(GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
constexpr void ClearBit(std::size_t offset) {
|
||||
ClearBit(GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
// Check that all the bits are set
|
||||
const u64 mask{((1ULL << count) - 1) << shift};
|
||||
u64 v{bits[bit_ind]};
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % 64 == 0);
|
||||
ASSERT(count % 64 == 0);
|
||||
// Check that all the bits are set
|
||||
std::size_t remaining{count};
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= 64;
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
||||
region_size = Common::AlignUp(region_size, 64) / 64;
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
constexpr Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
constexpr VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock() {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock()};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return Bitmap::CalculateMetadataOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
PageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
@@ -1,13 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
||||
@@ -8,11 +8,11 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t PageBits{12};
|
||||
constexpr std::size_t PageSize{1 << PageBits};
|
||||
|
||||
using Page = std::array<u8, PageSize>;
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
||||
@@ -14,14 +14,14 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/lock.h"
|
||||
@@ -39,8 +39,10 @@ namespace {
|
||||
*/
|
||||
void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
|
||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||
auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||
auto thread_res =
|
||||
KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCoreId(), stack_top, &owner_process);
|
||||
|
||||
std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
@@ -117,6 +119,9 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
|
||||
|
||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||
process->name = std::move(name);
|
||||
|
||||
// TODO: This is inaccurate
|
||||
// The process should hold a reference to the kernel-wide resource limit.
|
||||
process->resource_limit = std::make_shared<KResourceLimit>(kernel, system);
|
||||
process->status = ProcessStatus::Created;
|
||||
process->program_id = 0;
|
||||
@@ -155,6 +160,9 @@ void Process::DecrementThreadCount() {
|
||||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
// TODO: This is expected to always return the application memory pool size after accurately
|
||||
// reserving kernel resources. The current workaround uses a process-local resource limit of
|
||||
// application memory pool size, which is inaccurate.
|
||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
@@ -248,8 +256,8 @@ ResultCode Process::Reset() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate that we're in a state that we can reset.
|
||||
R_UNLESS(status != ProcessStatus::Exited, Svc::ResultInvalidState);
|
||||
R_UNLESS(is_signaled, Svc::ResultInvalidState);
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
R_UNLESS(is_signaled, ResultInvalidState);
|
||||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
@@ -264,18 +272,29 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
system_resource_size = metadata.GetSystemResourceSize();
|
||||
image_size = code_size;
|
||||
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
return ResultResourceLimitedExceeded;
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, Memory::MemoryManager::Pool::Application)};
|
||||
code_size, KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(
|
||||
page_table->GetCodeRegionStart(), code_size / Memory::PageSize,
|
||||
Memory::MemoryState::Code, Memory::MemoryPermission::None)};
|
||||
if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
@@ -308,21 +327,24 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
|
||||
resource_limit->SetLimitValue(LimitableResource::Threads, 608);
|
||||
resource_limit->SetLimitValue(LimitableResource::Events, 700);
|
||||
resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
|
||||
resource_limit->SetLimitValue(LimitableResource::Sessions, 894);
|
||||
ASSERT(resource_limit->Reserve(LimitableResource::PhysicalMemory, code_size));
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.SetSize(capabilities.GetHandleTableSize());
|
||||
}
|
||||
|
||||
void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
AllocateMainThreadStack(stack_size);
|
||||
resource_limit->Reserve(LimitableResource::Threads, 1);
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||
|
||||
const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size};
|
||||
ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError());
|
||||
@@ -330,8 +352,6 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
|
||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||
resource_limit->Reserve(LimitableResource::Threads, 1);
|
||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||
}
|
||||
|
||||
void Process::PrepareForTermination() {
|
||||
@@ -358,6 +378,11 @@ void Process::PrepareForTermination() {
|
||||
FreeTLSRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
|
||||
if (resource_limit) {
|
||||
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||
main_thread_stack_size + image_size);
|
||||
}
|
||||
|
||||
ChangeStatus(ProcessStatus::Exited);
|
||||
}
|
||||
|
||||
@@ -381,22 +406,22 @@ VAddr Process::CreateTLSRegion() {
|
||||
return *tls_page_iter->ReserveSlot();
|
||||
}
|
||||
|
||||
Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
ASSERT(tls_page_ptr);
|
||||
|
||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
||||
const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
||||
const VAddr tls_page_addr{
|
||||
page_table
|
||||
->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
|
||||
Memory::MemoryState::ThreadLocal,
|
||||
Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
const VAddr tls_page_addr{page_table
|
||||
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
|
||||
KMemoryState::ThreadLocal,
|
||||
KMemoryPermission::ReadAndWrite,
|
||||
tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
std::memset(tls_page_ptr, 0, Memory::PageSize);
|
||||
std::memset(tls_page_ptr, 0, PageSize);
|
||||
tls_pages.emplace_back(tls_page_addr);
|
||||
|
||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
||||
@@ -423,15 +448,15 @@ void Process::FreeTLSRegion(VAddr tls_address) {
|
||||
void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Memory::MemoryPermission permission) {
|
||||
KMemoryPermission permission) {
|
||||
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
|
||||
|
||||
ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
|
||||
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
bool Process::IsSignaled() const {
|
||||
@@ -440,9 +465,9 @@ bool Process::IsSignaled() const {
|
||||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: KSynchronizationObject{system.Kernel()},
|
||||
page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
|
||||
address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {}
|
||||
: KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system},
|
||||
state_lock{system.Kernel()}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
@@ -460,16 +485,15 @@ ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize);
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start,
|
||||
size / Memory::PageSize, Memory::MemoryState::Stack,
|
||||
Memory::MemoryPermission::ReadAndWrite));
|
||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::ReadAndWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
|
||||
@@ -29,16 +29,13 @@ class ProgramMetadata;
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KPageTable;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
|
||||
enum class MemoryRegion : u16 {
|
||||
APPLICATION = 1,
|
||||
SYSTEM = 2,
|
||||
@@ -104,12 +101,12 @@ public:
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
Memory::PageTable& PageTable() {
|
||||
KPageTable& PageTable() {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const Memory::PageTable& PageTable() const {
|
||||
const KPageTable& PageTable() const {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
@@ -385,7 +382,7 @@ private:
|
||||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<Memory::PageTable> page_table;
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
|
||||
#include "common/bit_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
@@ -69,7 +69,7 @@ u32 GetFlagBitOffset(CapabilityType type) {
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
// Allow all cores and priorities.
|
||||
@@ -82,7 +82,7 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
|
||||
|
||||
ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
@@ -108,7 +108,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
|
||||
|
||||
ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
u32 set_flags = 0;
|
||||
u32 set_svc_bits = 0;
|
||||
|
||||
@@ -123,13 +123,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
// If there's only one, then there's a problem.
|
||||
if (i >= num_capabilities) {
|
||||
LOG_ERROR(Kernel, "Invalid combination! i={}", i);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
|
||||
const auto size_flags = capabilities[i];
|
||||
if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
|
||||
LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
|
||||
const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
|
||||
@@ -155,11 +155,11 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
|
||||
u32 flag, Memory::PageTable& page_table) {
|
||||
u32 flag, KPageTable& page_table) {
|
||||
const auto type = GetCapabilityType(flag);
|
||||
|
||||
if (type == CapabilityType::Unset) {
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
return ResultInvalidCapabilityDescriptor;
|
||||
}
|
||||
|
||||
// Bail early on ignorable entries, as one would expect,
|
||||
@@ -176,7 +176,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
|
||||
LOG_ERROR(Kernel,
|
||||
"Attempted to initialize flags that may only be initialized once. set_flags={}",
|
||||
set_flags);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
set_flags |= set_flag;
|
||||
|
||||
@@ -202,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
|
||||
}
|
||||
|
||||
LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
return ResultInvalidCapabilityDescriptor;
|
||||
}
|
||||
|
||||
void ProcessCapabilities::Clear() {
|
||||
@@ -225,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
if (priority_mask != 0 || core_mask != 0) {
|
||||
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
|
||||
priority_mask, core_mask);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
return ResultInvalidCapabilityDescriptor;
|
||||
}
|
||||
|
||||
const u32 core_num_min = (flags >> 16) & 0xFF;
|
||||
@@ -233,7 +233,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
if (core_num_min > core_num_max) {
|
||||
LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
|
||||
core_num_min, core_num_max);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
|
||||
const u32 priority_min = (flags >> 10) & 0x3F;
|
||||
@@ -242,13 +242,13 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
|
||||
LOG_ERROR(Kernel,
|
||||
"Priority min is greater than priority max! priority_min={}, priority_max={}",
|
||||
core_num_min, priority_max);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
|
||||
// The switch only has 4 usable cores.
|
||||
if (core_num_max >= 4) {
|
||||
LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
|
||||
return ERR_INVALID_PROCESSOR_ID;
|
||||
return ResultInvalidCoreId;
|
||||
}
|
||||
|
||||
const auto make_mask = [](u64 min, u64 max) {
|
||||
@@ -269,7 +269,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
||||
|
||||
// If we've already set this svc before, bail.
|
||||
if ((set_svc_bits & svc_bit) != 0) {
|
||||
return ERR_INVALID_COMBINATION;
|
||||
return ResultInvalidCombination;
|
||||
}
|
||||
set_svc_bits |= svc_bit;
|
||||
|
||||
@@ -283,7 +283,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
||||
|
||||
if (svc_number >= svc_capabilities.size()) {
|
||||
LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
|
||||
return ERR_OUT_OF_RANGE;
|
||||
return ResultOutOfRange;
|
||||
}
|
||||
|
||||
svc_capabilities[svc_number] = true;
|
||||
@@ -293,12 +293,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) {
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -321,7 +321,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
|
||||
if (interrupt >= interrupt_capabilities.size()) {
|
||||
LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
|
||||
interrupt);
|
||||
return ERR_OUT_OF_RANGE;
|
||||
return ResultOutOfRange;
|
||||
}
|
||||
|
||||
interrupt_capabilities[interrupt] = true;
|
||||
@@ -334,7 +334,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 17;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
return ResultReservedValue;
|
||||
}
|
||||
|
||||
program_type = static_cast<ProgramType>((flags >> 14) & 0b111);
|
||||
@@ -354,7 +354,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
|
||||
LOG_ERROR(Kernel,
|
||||
"Kernel version is non zero or flags are too small! major_version={}, flags={}",
|
||||
major_version, flags);
|
||||
return ERR_INVALID_CAPABILITY_DESCRIPTOR;
|
||||
return ResultInvalidCapabilityDescriptor;
|
||||
}
|
||||
|
||||
kernel_version = flags;
|
||||
@@ -365,7 +365,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 26;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
return ResultReservedValue;
|
||||
}
|
||||
|
||||
handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF);
|
||||
@@ -376,7 +376,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
|
||||
const u32 reserved = flags >> 19;
|
||||
if (reserved != 0) {
|
||||
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
|
||||
return ERR_RESERVED_VALUE;
|
||||
return ResultReservedValue;
|
||||
}
|
||||
|
||||
is_debuggable = (flags & 0x20000) != 0;
|
||||
|
||||
@@ -12,9 +12,7 @@ union ResultCode;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
class KPageTable;
|
||||
|
||||
/// The possible types of programs that may be indicated
|
||||
/// by the program type capability descriptor.
|
||||
@@ -90,7 +88,7 @@ public:
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a userland process.
|
||||
///
|
||||
@@ -103,7 +101,7 @@ public:
|
||||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a process that does not
|
||||
/// have any metadata to parse.
|
||||
@@ -189,7 +187,7 @@ private:
|
||||
/// @return RESULT_SUCCESS if no errors occur, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Attempts to parse a capability descriptor that is only represented by a
|
||||
/// single flag set.
|
||||
@@ -204,7 +202,7 @@ private:
|
||||
/// @return RESULT_SUCCESS if no errors occurred, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Clears the internal state of this process capability instance. Necessary,
|
||||
/// to have a sane starting point due to us allowing running executables without
|
||||
@@ -228,10 +226,10 @@ private:
|
||||
ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
||||
|
||||
/// Handles flags related to mapping physical memory pages.
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table);
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to mapping IO pages.
|
||||
ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table);
|
||||
ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to the interrupt capability flags.
|
||||
ResultCode HandleInterruptFlags(u32 flags);
|
||||
|
||||
@@ -5,11 +5,11 @@
|
||||
#include <tuple>
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -18,7 +18,7 @@ ServerPort::~ServerPort() = default;
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
if (pending_sessions.empty()) {
|
||||
return ERR_NOT_FOUND;
|
||||
return ResultNotFound;
|
||||
}
|
||||
|
||||
auto session = std::move(pending_sessions.back());
|
||||
|
||||
@@ -4,15 +4,23 @@
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
Session::~Session() = default;
|
||||
Session::~Session() {
|
||||
// Release reserved resource when the Session pair was created.
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1);
|
||||
}
|
||||
|
||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
// Reserve a new session from the resource limit.
|
||||
KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
ASSERT(session_reservation.Succeeded());
|
||||
auto session{std::make_shared<Session>(kernel)};
|
||||
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
||||
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
||||
@@ -21,6 +29,7 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
session->client = client_session;
|
||||
session->server = server_session;
|
||||
|
||||
session_reservation.Commit();
|
||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user