Compare commits
289 Commits
mainline-0
...
mainline-0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b49309d24 | ||
|
|
08f12fb888 | ||
|
|
ca64fc4c9a | ||
|
|
2d5eca3f8c | ||
|
|
6edd4d8d24 | ||
|
|
956bbe3133 | ||
|
|
2b650543c6 | ||
|
|
6d0d79109b | ||
|
|
8fc49a83b6 | ||
|
|
900b2e5cae | ||
|
|
1d2ba3cc97 | ||
|
|
1a66cde175 | ||
|
|
e9faa1617c | ||
|
|
22c6b9fab2 | ||
|
|
30e365e4fc | ||
|
|
48e16c4c49 | ||
|
|
34f8881d3e | ||
|
|
c8db7d1399 | ||
|
|
7ffb672f61 | ||
|
|
425a254fa2 | ||
|
|
6edadef96d | ||
|
|
233ed96a5c | ||
|
|
d30cf51d7d | ||
|
|
0b5b93053d | ||
|
|
ecbfa416f0 | ||
|
|
9ad6327fbd | ||
|
|
6233b1db08 | ||
|
|
f2458106e6 | ||
|
|
19ce0d4f1a | ||
|
|
faf5ae6a50 | ||
|
|
116a940dbb | ||
|
|
7ea362e134 | ||
|
|
e54699565a | ||
|
|
f73e569ba8 | ||
|
|
c3e43c7e81 | ||
|
|
67b8265bd6 | ||
|
|
f632d00eb1 | ||
|
|
36651f215a | ||
|
|
707bf41c6f | ||
|
|
d2b50c5ebd | ||
|
|
4bbb22a477 | ||
|
|
d49ed4a421 | ||
|
|
74f515e8b6 | ||
|
|
e36814d6d5 | ||
|
|
ef2b6733d0 | ||
|
|
dc70a36b44 | ||
|
|
40cd4df584 | ||
|
|
2f79cc3ef5 | ||
|
|
2883cc1658 | ||
|
|
560cfbc21a | ||
|
|
be9f80ef56 | ||
|
|
526e533e90 | ||
|
|
3c1b6b5723 | ||
|
|
5c7253f8d3 | ||
|
|
930b7c18a6 | ||
|
|
b2c7636710 | ||
|
|
ff64c3951a | ||
|
|
fb6cf12a17 | ||
|
|
c34da106ed | ||
|
|
c3d3b173d3 | ||
|
|
e3ee017e91 | ||
|
|
fe65045dcb | ||
|
|
e0242a4654 | ||
|
|
c47fc3301d | ||
|
|
913d0bb269 | ||
|
|
aed6d8bef5 | ||
|
|
40ec1c22ce | ||
|
|
9403979c22 | ||
|
|
930e311526 | ||
|
|
9341ca7979 | ||
|
|
d66ab2b8aa | ||
|
|
ba3c55ab7b | ||
|
|
a3149536e2 | ||
|
|
e7e939104b | ||
|
|
50a518be69 | ||
|
|
e4c381b885 | ||
|
|
b05bfc6036 | ||
|
|
89ef3ef575 | ||
|
|
849581075a | ||
|
|
b2165c6b35 | ||
|
|
3f08e8d8d4 | ||
|
|
536fc7f0ea | ||
|
|
fc7d0a17b6 | ||
|
|
e58748fd80 | ||
|
|
323680e5ad | ||
|
|
4c2ed2706e | ||
|
|
6df6caaf5f | ||
|
|
ec0ce96c56 | ||
|
|
31daaa7911 | ||
|
|
59484442a0 | ||
|
|
ef4446cb11 | ||
|
|
f6b9b7910e | ||
|
|
410d44ce05 | ||
|
|
6d23b045a0 | ||
|
|
8652313af2 | ||
|
|
2899c93818 | ||
|
|
50c7539108 | ||
|
|
33a6b45a6c | ||
|
|
9046d4a548 | ||
|
|
b03242067d | ||
|
|
74fff717aa | ||
|
|
b7031b2b9d | ||
|
|
7298dcc016 | ||
|
|
ec8bfe94a7 | ||
|
|
e81e0036b4 | ||
|
|
63248f4edd | ||
|
|
6eaf7ab55f | ||
|
|
9ec84fc592 | ||
|
|
6a3fc5d2ff | ||
|
|
4ed183ee42 | ||
|
|
6e4d46908a | ||
|
|
49e0a30dbd | ||
|
|
9a60d8a430 | ||
|
|
dc2e83fa31 | ||
|
|
e3d7334be9 | ||
|
|
919ac2c4d3 | ||
|
|
894ad74b87 | ||
|
|
e35b9597ef | ||
|
|
36d9b409fc | ||
|
|
f936b86c7c | ||
|
|
180417c514 | ||
|
|
c8a48aacc0 | ||
|
|
0f23359a44 | ||
|
|
287ae2b9e8 | ||
|
|
dbeb523879 | ||
|
|
4f5d8e4342 | ||
|
|
dc9961f341 | ||
|
|
32c1bc6a67 | ||
|
|
bedc903c65 | ||
|
|
eedb048585 | ||
|
|
d7953b8ee5 | ||
|
|
46bb609981 | ||
|
|
2ab41ceff4 | ||
|
|
2d16507f9f | ||
|
|
73aaf365e7 | ||
|
|
cc81c0ce64 | ||
|
|
c52f37f259 | ||
|
|
24f4198cee | ||
|
|
bc10714dcf | ||
|
|
b0819e2ffb | ||
|
|
f9d7a6bec6 | ||
|
|
c8473f399e | ||
|
|
60993513af | ||
|
|
67a8bd1e70 | ||
|
|
7d16b2d2dd | ||
|
|
8ed0d92e32 | ||
|
|
3026aec9bd | ||
|
|
2ac834c722 | ||
|
|
a8295d2c53 | ||
|
|
3e0e4f146b | ||
|
|
14581e4a59 | ||
|
|
4681381a34 | ||
|
|
80eacdf89b | ||
|
|
bb31df62bb | ||
|
|
f2e34efcbb | ||
|
|
2c4c2b5eee | ||
|
|
48a1687f51 | ||
|
|
790a482bb4 | ||
|
|
1cd8637bf0 | ||
|
|
c6bc13d0aa | ||
|
|
885d88825e | ||
|
|
360b0d1b30 | ||
|
|
02880a8195 | ||
|
|
cd0f5dfc17 | ||
|
|
f3d1b370aa | ||
|
|
95137a04e1 | ||
|
|
b6f6733131 | ||
|
|
7f424d0f60 | ||
|
|
ebb30cbefb | ||
|
|
c5c89a4d5c | ||
|
|
7990220df7 | ||
|
|
cc9e682021 | ||
|
|
03b73aa575 | ||
|
|
61f6eaad45 | ||
|
|
cf770a68a5 | ||
|
|
d1f0d182a7 | ||
|
|
2cacf97099 | ||
|
|
d24fcaff77 | ||
|
|
f7d4c84807 | ||
|
|
e21b6ff79d | ||
|
|
64cbebc58f | ||
|
|
e0c46e6879 | ||
|
|
47a6bb2d5b | ||
|
|
f11b87ebf1 | ||
|
|
75dec14f21 | ||
|
|
23878bf360 | ||
|
|
86a1eb7789 | ||
|
|
12dc918937 | ||
|
|
86c397dd6e | ||
|
|
ef060ed40c | ||
|
|
581d2e36e5 | ||
|
|
27ab99490e | ||
|
|
19a0abc19b | ||
|
|
96d677bef0 | ||
|
|
d4a1afba4b | ||
|
|
fb9418798d | ||
|
|
b0b0786493 | ||
|
|
8714d40a77 | ||
|
|
8dc9f35baf | ||
|
|
883eb1a1a1 | ||
|
|
0fc596de6e | ||
|
|
18c1cb68fd | ||
|
|
096f339a2a | ||
|
|
fa0d65fc7b | ||
|
|
a056d8de16 | ||
|
|
bfa973a62b | ||
|
|
3ab0514698 | ||
|
|
cd66395944 | ||
|
|
56e237d1f9 | ||
|
|
08b2b1080a | ||
|
|
3d7c284e0f | ||
|
|
b6ae48966d | ||
|
|
0e8a3bf3e5 | ||
|
|
344d15f61e | ||
|
|
e9d2fad984 | ||
|
|
f1facaeaef | ||
|
|
e2ea0c3e11 | ||
|
|
028b1a34a9 | ||
|
|
f019817f8f | ||
|
|
ff5a0f370c | ||
|
|
7b069252f8 | ||
|
|
46c3047283 | ||
|
|
ae7dfa93be | ||
|
|
deb1b54eed | ||
|
|
39c66abd91 | ||
|
|
c4374d0d41 | ||
|
|
35d40b74b3 | ||
|
|
c414ebaa9c | ||
|
|
e07dfc4da3 | ||
|
|
63d30133f8 | ||
|
|
f1e4f3fc0c | ||
|
|
468576284d | ||
|
|
4d66ca97e5 | ||
|
|
c1a3d19897 | ||
|
|
654b77d2ec | ||
|
|
0c8b6b0351 | ||
|
|
9a62f5351a | ||
|
|
65bed6682c | ||
|
|
0e34f648f4 | ||
|
|
21e07df7b7 | ||
|
|
62a3d59288 | ||
|
|
4dc068a90c | ||
|
|
1bdae0fe29 | ||
|
|
ae6eb61892 | ||
|
|
2b4208254e | ||
|
|
84887b0088 | ||
|
|
4edf73344f | ||
|
|
8c1e38f744 | ||
|
|
fdc5791b08 | ||
|
|
ece5287843 | ||
|
|
b0ab803ce8 | ||
|
|
28bb248db6 | ||
|
|
a81987a7cb | ||
|
|
832c138eff | ||
|
|
bec7e3b7d9 | ||
|
|
727ba2f2d0 | ||
|
|
442a1cc021 | ||
|
|
76ca2a5f82 | ||
|
|
11e39da02b | ||
|
|
23cabc98db | ||
|
|
658489ebf7 | ||
|
|
9293c3a0f2 | ||
|
|
04b838c857 | ||
|
|
2382bbe3ac | ||
|
|
b5138f3c35 | ||
|
|
a81bd962ab | ||
|
|
3d0cde6a75 | ||
|
|
ce20ed8e4e | ||
|
|
3c6557c235 | ||
|
|
d3651b0b82 | ||
|
|
c7698d0bc8 | ||
|
|
a14d202ac2 | ||
|
|
28fece8e9b | ||
|
|
a993df1ee2 | ||
|
|
1643af431c | ||
|
|
2ec5b55ee3 | ||
|
|
c2486f77e4 | ||
|
|
6c8f28813c | ||
|
|
fa31e5b868 | ||
|
|
538ddd220e | ||
|
|
961fe4d19b | ||
|
|
f19c1a7cda | ||
|
|
2fb0bbff29 | ||
|
|
2dc469ceba | ||
|
|
bd2aff3e26 | ||
|
|
bb207fe27a | ||
|
|
c0eb1aecfd | ||
|
|
57a46c69f1 | ||
|
|
c1e9ca4c08 |
@@ -1,12 +1,16 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Copy documentation
|
||||
cp license.txt "$REV_NAME"
|
||||
cp README.md "$REV_NAME"
|
||||
cp license.txt "$DIR_NAME"
|
||||
cp README.md "$DIR_NAME"
|
||||
|
||||
tar $COMPRESSION_FLAGS "$ARCHIVE_NAME" "$REV_NAME"
|
||||
tar -cJvf "${REV_NAME}-source.tar.xz" src externals CMakeLists.txt README.md license.txt
|
||||
cp "${REV_NAME}-source.tar.xz" "$DIR_NAME"
|
||||
|
||||
mv "$REV_NAME" $RELEASE_NAME
|
||||
tar $COMPRESSION_FLAGS "$ARCHIVE_NAME" "$DIR_NAME"
|
||||
|
||||
mv "$DIR_NAME" $RELEASE_NAME
|
||||
mv "${REV_NAME}-source.tar.xz" $RELEASE_NAME
|
||||
|
||||
7z a "$REV_NAME.7z" $RELEASE_NAME
|
||||
|
||||
|
||||
@@ -11,5 +11,4 @@ ninja
|
||||
|
||||
ccache -s
|
||||
|
||||
# Ignore zlib's tests, since they aren't gated behind a CMake option.
|
||||
ctest -VV -E "(example|example64)" -C Release
|
||||
ctest -VV -C Release
|
||||
|
||||
@@ -6,9 +6,15 @@ REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.xz"
|
||||
COMPRESSION_FLAGS="-cJvf"
|
||||
|
||||
mkdir "$REV_NAME"
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
DIR_NAME="${REV_NAME}"
|
||||
else
|
||||
DIR_NAME="${REV_NAME}_${RELEASE_NAME}"
|
||||
fi
|
||||
|
||||
cp build/bin/yuzu-cmd "$REV_NAME"
|
||||
cp build/bin/yuzu "$REV_NAME"
|
||||
mkdir "$DIR_NAME"
|
||||
|
||||
cp build/bin/yuzu-cmd "$DIR_NAME"
|
||||
cp build/bin/yuzu "$DIR_NAME"
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
|
||||
@@ -1,12 +1,23 @@
|
||||
$GITDATE = $(git show -s --date=short --format='%ad') -replace "-",""
|
||||
param($BUILD_NAME)
|
||||
|
||||
$GITDATE = $(git show -s --date=short --format='%ad') -replace "-", ""
|
||||
$GITREV = $(git show -s --format='%h')
|
||||
$RELEASE_DIST = "yuzu-windows-msvc"
|
||||
|
||||
if ("$BUILD_NAME" -eq "mainline") {
|
||||
$RELEASE_DIST = "yuzu-windows-msvc"
|
||||
}
|
||||
else {
|
||||
$RELEASE_DIST = "yuzu-windows-msvc-$BUILD_NAME"
|
||||
}
|
||||
|
||||
$MSVC_BUILD_ZIP = "yuzu-windows-msvc-$GITDATE-$GITREV.zip" -replace " ", ""
|
||||
$MSVC_BUILD_PDB = "yuzu-windows-msvc-$GITDATE-$GITREV-debugsymbols.zip" -replace " ", ""
|
||||
$MSVC_SEVENZIP = "yuzu-windows-msvc-$GITDATE-$GITREV.7z" -replace " ", ""
|
||||
$MSVC_TAR = "yuzu-windows-msvc-$GITDATE-$GITREV.tar" -replace " ", ""
|
||||
$MSVC_TARXZ = "yuzu-windows-msvc-$GITDATE-$GITREV.tar.xz" -replace " ", ""
|
||||
$MSVC_SOURCE = "yuzu-windows-msvc-source-$GITDATE-$GITREV" -replace " ", ""
|
||||
$MSVC_SOURCE_TAR = "$MSVC_SOURCE.tar"
|
||||
$MSVC_SOURCE_TARXZ = "$MSVC_SOURCE_TAR.xz"
|
||||
|
||||
$env:BUILD_ZIP = $MSVC_BUILD_ZIP
|
||||
$env:BUILD_SYMBOLS = $MSVC_BUILD_PDB
|
||||
@@ -14,19 +25,34 @@ $env:BUILD_UPDATE = $MSVC_SEVENZIP
|
||||
|
||||
$BUILD_DIR = ".\build\bin\Release"
|
||||
|
||||
# Upload debugging symbols
|
||||
mkdir pdb
|
||||
Get-ChildItem "$BUILD_DIR\" -Recurse -Filter "*.pdb" | Copy-Item -destination .\pdb
|
||||
7z a -tzip $MSVC_BUILD_PDB .\pdb\*.pdb
|
||||
rm "$BUILD_DIR\*.pdb"
|
||||
|
||||
# Create artifact directories
|
||||
mkdir $RELEASE_DIST
|
||||
mkdir $MSVC_SOURCE
|
||||
mkdir "artifacts"
|
||||
|
||||
# Build a tar.xz for the source of the release
|
||||
Copy-Item .\license.txt -Destination $MSVC_SOURCE
|
||||
Copy-Item .\README.md -Destination $MSVC_SOURCE
|
||||
Copy-Item .\CMakeLists.txt -Destination $MSVC_SOURCE
|
||||
Copy-Item .\src -Recurse -Destination $MSVC_SOURCE
|
||||
Copy-Item .\externals -Recurse -Destination $MSVC_SOURCE
|
||||
Copy-Item .\dist -Recurse -Destination $MSVC_SOURCE
|
||||
Copy-Item .\CMakeModules -Recurse -Destination $MSVC_SOURCE
|
||||
7z a -r -ttar $MSVC_SOURCE_TAR $MSVC_SOURCE
|
||||
7z a -r -txz $MSVC_SOURCE_TARXZ $MSVC_SOURCE_TAR
|
||||
|
||||
# Build the final release artifacts
|
||||
Copy-Item $MSVC_SOURCE_TARXZ -Destination $RELEASE_DIST
|
||||
Copy-Item "$BUILD_DIR\*" -Destination $RELEASE_DIST -Recurse
|
||||
rm "$RELEASE_DIST\*.exe"
|
||||
Get-ChildItem "$BUILD_DIR" -Recurse -Filter "yuzu*.exe" | Copy-Item -destination $RELEASE_DIST
|
||||
Get-ChildItem "$BUILD_DIR" -Recurse -Filter "QtWebEngineProcess*.exe" | Copy-Item -destination $RELEASE_DIST
|
||||
Copy-Item .\license.txt -Destination $RELEASE_DIST
|
||||
Copy-Item .\README.md -Destination $RELEASE_DIST
|
||||
7z a -tzip $MSVC_BUILD_ZIP $RELEASE_DIST\*
|
||||
7z a $MSVC_SEVENZIP $RELEASE_DIST
|
||||
|
||||
@@ -35,4 +61,4 @@ Copy-Item .\README.md -Destination $RELEASE_DIST
|
||||
|
||||
Get-ChildItem . -Filter "*.zip" | Copy-Item -destination "artifacts"
|
||||
Get-ChildItem . -Filter "*.7z" | Copy-Item -destination "artifacts"
|
||||
Get-ChildItem . -Filter "*.tar.xz" | Copy-Item -destination "artifacts"
|
||||
Get-ChildItem . -Filter "*.tar.xz" | Copy-Item -destination "artifacts"
|
||||
|
||||
@@ -6,8 +6,14 @@ REV_NAME="yuzu-windows-mingw-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.gz"
|
||||
COMPRESSION_FLAGS="-czvf"
|
||||
|
||||
mkdir "$REV_NAME"
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
DIR_NAME="${REV_NAME}"
|
||||
else
|
||||
DIR_NAME="${REV_NAME}_${RELEASE_NAME}"
|
||||
fi
|
||||
|
||||
mkdir "$DIR_NAME"
|
||||
# get around the permission issues
|
||||
cp -r package/* "$REV_NAME"
|
||||
cp -r package/* "$DIR_NAME"
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
|
||||
@@ -17,6 +17,7 @@ steps:
|
||||
inputs:
|
||||
targetType: 'filePath'
|
||||
filePath: './.ci/scripts/windows/upload.ps1'
|
||||
arguments: '$(BuildName)'
|
||||
- publish: artifacts
|
||||
artifact: 'yuzu-$(BuildName)-windows-msvc'
|
||||
displayName: 'Upload Artifacts'
|
||||
|
||||
@@ -10,6 +10,7 @@ stages:
|
||||
jobs:
|
||||
- job: format
|
||||
displayName: 'clang'
|
||||
continueOnError: true
|
||||
pool:
|
||||
vmImage: ubuntu-latest
|
||||
steps:
|
||||
|
||||
8
.gitmodules
vendored
8
.gitmodules
vendored
@@ -47,8 +47,8 @@
|
||||
path = externals/sirit
|
||||
url = https://github.com/ReinUsesLisp/sirit
|
||||
[submodule "libzip"]
|
||||
path = externals/libzip
|
||||
url = https://github.com/DarkLordZach/libzip
|
||||
path = externals/libzip/libzip
|
||||
url = https://github.com/nih-at/libzip.git
|
||||
[submodule "zlib"]
|
||||
path = externals/zlib
|
||||
url = https://github.com/madler/zlib
|
||||
path = externals/zlib/zlib
|
||||
url = https://github.com/madler/zlib.git
|
||||
|
||||
@@ -29,7 +29,7 @@ option(ENABLE_VULKAN "Enables Vulkan backend" ON)
|
||||
|
||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||
|
||||
if(NOT EXISTS ${PROJECT_SOURCE_DIR}/.git/hooks/pre-commit)
|
||||
if(EXISTS ${PROJECT_SOURCE_DIR}/hooks/pre-commit AND NOT EXISTS ${PROJECT_SOURCE_DIR}/.git/hooks/pre-commit)
|
||||
message(STATUS "Copying pre-commit hook")
|
||||
file(COPY hooks/pre-commit
|
||||
DESTINATION ${PROJECT_SOURCE_DIR}/.git/hooks)
|
||||
@@ -49,7 +49,10 @@ function(check_submodules_present)
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction()
|
||||
check_submodules_present()
|
||||
|
||||
if(EXISTS ${PROJECT_SOURCE_DIR}/.gitmodules)
|
||||
check_submodules_present()
|
||||
endif()
|
||||
|
||||
configure_file(${PROJECT_SOURCE_DIR}/dist/compatibility_list/compatibility_list.qrc
|
||||
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc
|
||||
|
||||
@@ -1 +1 @@
|
||||
**The Contributor's Guide has moved to [the Yuzu wiki](https://github.com/yuzu-emu/yuzu/wiki/Contributing).**
|
||||
**The Contributor's Guide has moved to [the yuzu wiki](https://github.com/yuzu-emu/yuzu/wiki/Contributing).**
|
||||
|
||||
1
externals/CMakeLists.txt
vendored
1
externals/CMakeLists.txt
vendored
@@ -76,6 +76,7 @@ endif()
|
||||
|
||||
# zlib
|
||||
add_subdirectory(zlib EXCLUDE_FROM_ALL)
|
||||
set(ZLIB_LIBRARIES z)
|
||||
|
||||
# libzip
|
||||
add_subdirectory(libzip EXCLUDE_FROM_ALL)
|
||||
|
||||
2
externals/Vulkan-Headers
vendored
2
externals/Vulkan-Headers
vendored
Submodule externals/Vulkan-Headers updated: fd568d51ed...d42d0747ee
5043
externals/httplib/httplib.h
vendored
5043
externals/httplib/httplib.h
vendored
File diff suppressed because it is too large
Load Diff
1
externals/libzip
vendored
1
externals/libzip
vendored
Submodule externals/libzip deleted from bd7a8103e9
564
externals/libzip/CMakeLists.txt
vendored
Normal file
564
externals/libzip/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
# TODO:
|
||||
# create usable libtool .la file
|
||||
|
||||
CMAKE_MINIMUM_REQUIRED(VERSION 3.0.2)
|
||||
|
||||
LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libzip")
|
||||
|
||||
PROJECT(libzip C)
|
||||
|
||||
OPTION(ENABLE_COMMONCRYPTO "Enable use of CommonCrypto" ON)
|
||||
OPTION(ENABLE_GNUTLS "Enable use of GnuTLS" ON)
|
||||
OPTION(ENABLE_MBEDTLS "Enable use of mbed TLS" ON)
|
||||
OPTION(ENABLE_OPENSSL "Enable use of OpenSSL" ON)
|
||||
OPTION(ENABLE_WINDOWS_CRYPTO "Enable use of Windows cryptography libraries" ON)
|
||||
|
||||
OPTION(ENABLE_BZIP2 "Enable use of BZip2" OFF)
|
||||
OPTION(ENABLE_LZMA "Enable use of LZMA" OFF)
|
||||
|
||||
INCLUDE(CheckFunctionExists)
|
||||
INCLUDE(CheckIncludeFiles)
|
||||
INCLUDE(CheckSymbolExists)
|
||||
INCLUDE(CheckTypeSize)
|
||||
INCLUDE(CheckCSourceRuns)
|
||||
INCLUDE(CheckCSourceCompiles)
|
||||
INCLUDE(CheckStructHasMember)
|
||||
INCLUDE(TestBigEndian)
|
||||
INCLUDE(GNUInstallDirs)
|
||||
IF(ENABLE_COMMONCRYPTO)
|
||||
CHECK_INCLUDE_FILES(CommonCrypto/CommonCrypto.h COMMONCRYPTO_FOUND)
|
||||
ELSE()
|
||||
SET(COMMONCRYPTO_FOUND FALSE)
|
||||
ENDIF()
|
||||
IF(ENABLE_GNUTLS)
|
||||
INCLUDE(FindNettle)
|
||||
INCLUDE(FindGnuTLS)
|
||||
ELSE()
|
||||
SET(GNUTLS_FOUND FALSE)
|
||||
ENDIF()
|
||||
IF(ENABLE_MBEDTLS)
|
||||
FIND_PATH(MBEDTLS_INCLUDE_DIR mbedtls/aes.h)
|
||||
FIND_LIBRARY(MBEDTLS_LIBRARIES NAMES mbedcrypto)
|
||||
ELSE()
|
||||
SET(MBEDTLS_LIBRARIES FALSE)
|
||||
ENDIF()
|
||||
IF(ENABLE_OPENSSL)
|
||||
INCLUDE(FindOpenSSL)
|
||||
ELSE()
|
||||
SET(OPENSSL_FOUND FALSE)
|
||||
ENDIF()
|
||||
IF(WIN32)
|
||||
IF(ENABLE_WINDOWS_CRYPTO)
|
||||
SET(WINDOWS_CRYPTO_FOUND TRUE)
|
||||
ENDIF()
|
||||
ELSE()
|
||||
SET(WINDOWS_CRYPTO_FOUND FALSE)
|
||||
ENDIF()
|
||||
|
||||
OPTION(BUILD_SHARED_LIBS "Build shared libraries" ON)
|
||||
OPTION(SHARED_LIB_VERSIONNING "Add SO version in .so build" ON)
|
||||
|
||||
SET(PACKAGE "libzip")
|
||||
SET(PACKAGE_NAME ${PACKAGE})
|
||||
SET(PACKAGE_VERSION_MAJOR "1")
|
||||
SET(PACKAGE_VERSION_MINOR "5")
|
||||
SET(PACKAGE_VERSION_MICRO "2a")
|
||||
#SET(VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}")
|
||||
SET(VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_MICRO}")
|
||||
SET(PACKAGE_VERSION ${VERSION})
|
||||
SET(LIBZIP_VERSION ${PACKAGE_VERSION})
|
||||
SET(LIBZIP_VERSION_MAJOR ${PACKAGE_VERSION_MAJOR})
|
||||
SET(LIBZIP_VERSION_MINOR ${PACKAGE_VERSION_MINOR})
|
||||
SET(LIBZIP_VERSION_MICRO ${PACKAGE_VERSION_MICRO})
|
||||
SET(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
|
||||
|
||||
SET(ARCHIVE_NAME ${PACKAGE_NAME}-${PACKAGE_VERSION})
|
||||
IF(NOT TARGET dist)
|
||||
ADD_CUSTOM_TARGET(dist
|
||||
COMMAND git config tar.tar.xz.command "xz -c"
|
||||
COMMAND git archive --prefix=${ARCHIVE_NAME}/ -o ${ARCHIVE_NAME}.tar.gz HEAD
|
||||
COMMAND git archive --prefix=${ARCHIVE_NAME}/ -o ${ARCHIVE_NAME}.tar.xz HEAD
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
)
|
||||
ADD_CUSTOM_TARGET(distcheck
|
||||
COMMAND chmod -R u+w ${ARCHIVE_NAME} ${ARCHIVE_NAME}-build ${ARCHIVE_NAME}-dest 2>/dev/null || true
|
||||
COMMAND rm -rf ${ARCHIVE_NAME} ${ARCHIVE_NAME}-build ${ARCHIVE_NAME}-dest
|
||||
COMMAND cmake -E tar xf ${ARCHIVE_NAME}.tar.gz
|
||||
COMMAND chmod -R u-w ${ARCHIVE_NAME}
|
||||
COMMAND mkdir ${ARCHIVE_NAME}-build
|
||||
COMMAND mkdir ${ARCHIVE_NAME}-dest
|
||||
COMMAND cd ${ARCHIVE_NAME}-build && cmake -DCMAKE_INSTALL_PREFIX=../${ARCHIVE_NAME}-dest ../${ARCHIVE_NAME}
|
||||
COMMAND cd ${ARCHIVE_NAME}-build && make -j4
|
||||
COMMAND cd ${ARCHIVE_NAME}-build && make test
|
||||
COMMAND cd ${ARCHIVE_NAME}-build && make install
|
||||
# COMMAND cd ${ARCHIVE_NAME}-build && make uninstall
|
||||
# COMMAND if [ `find ${ARCHIVE_NAME}-dest ! -type d | wc -l` -ne 0 ]; then echo leftover files in ${ARCHIVE_NAME}-dest; false; fi
|
||||
COMMAND cd ${ARCHIVE_NAME}-build && make clean
|
||||
COMMAND chmod -R u+w ${ARCHIVE_NAME} ${ARCHIVE_NAME}-build ${ARCHIVE_NAME}-dest
|
||||
COMMAND rm -rf ${ARCHIVE_NAME} ${ARCHIVE_NAME}-build ${ARCHIVE_NAME}-dest
|
||||
COMMAND echo "${ARCHIVE_NAME}.tar.gz is ready for distribution."
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
)
|
||||
ADD_DEPENDENCIES(distcheck dist)
|
||||
ENDIF(NOT TARGET dist)
|
||||
|
||||
IF(BUILD_SHARED_LIBS)
|
||||
SET(HAVE_SHARED TRUE)
|
||||
ELSE()
|
||||
SET(ZIP_STATIC TRUE)
|
||||
ENDIF()
|
||||
|
||||
# Checks
|
||||
|
||||
CHECK_FUNCTION_EXISTS(_chmod HAVE__CHMOD)
|
||||
CHECK_FUNCTION_EXISTS(_close HAVE__CLOSE)
|
||||
CHECK_FUNCTION_EXISTS(_dup HAVE__DUP)
|
||||
CHECK_FUNCTION_EXISTS(_fdopen HAVE__FDOPEN)
|
||||
CHECK_FUNCTION_EXISTS(_fileno HAVE__FILENO)
|
||||
CHECK_FUNCTION_EXISTS(_open HAVE__OPEN)
|
||||
CHECK_FUNCTION_EXISTS(_setmode HAVE__SETMODE)
|
||||
CHECK_FUNCTION_EXISTS(_snprintf HAVE__SNPRINTF)
|
||||
CHECK_FUNCTION_EXISTS(_strdup HAVE__STRDUP)
|
||||
CHECK_FUNCTION_EXISTS(_stricmp HAVE__STRICMP)
|
||||
CHECK_FUNCTION_EXISTS(_strtoi64 HAVE__STRTOI64)
|
||||
CHECK_FUNCTION_EXISTS(_strtoui64 HAVE__STRTOUI64)
|
||||
CHECK_FUNCTION_EXISTS(_unlink HAVE__UNLINK)
|
||||
CHECK_FUNCTION_EXISTS(arc4random HAVE_ARC4RANDOM)
|
||||
CHECK_FUNCTION_EXISTS(clonefile HAVE_CLONEFILE)
|
||||
CHECK_FUNCTION_EXISTS(explicit_bzero HAVE_EXPLICIT_BZERO)
|
||||
CHECK_FUNCTION_EXISTS(explicit_memset HAVE_EXPLICIT_MEMSET)
|
||||
CHECK_FUNCTION_EXISTS(fileno HAVE_FILENO)
|
||||
CHECK_FUNCTION_EXISTS(fseeko HAVE_FSEEKO)
|
||||
CHECK_FUNCTION_EXISTS(ftello HAVE_FTELLO)
|
||||
CHECK_FUNCTION_EXISTS(getprogname HAVE_GETPROGNAME)
|
||||
CHECK_FUNCTION_EXISTS(localtime_r HAVE_LOCALTIME_R)
|
||||
CHECK_FUNCTION_EXISTS(open HAVE_OPEN)
|
||||
CHECK_FUNCTION_EXISTS(setmode HAVE_SETMODE)
|
||||
CHECK_FUNCTION_EXISTS(snprintf HAVE_SNPRINTF)
|
||||
CHECK_FUNCTION_EXISTS(strcasecmp HAVE_STRCASECMP)
|
||||
CHECK_FUNCTION_EXISTS(strdup HAVE_STRDUP)
|
||||
CHECK_FUNCTION_EXISTS(stricmp HAVE_STRICMP)
|
||||
CHECK_FUNCTION_EXISTS(strtoll HAVE_STRTOLL)
|
||||
CHECK_FUNCTION_EXISTS(strtoull HAVE_STRTOULL)
|
||||
|
||||
CHECK_INCLUDE_FILES("sys/types.h;sys/stat.h;fts.h" HAVE_FTS_H)
|
||||
CHECK_INCLUDE_FILES(stdbool.h HAVE_STDBOOL_H)
|
||||
CHECK_INCLUDE_FILES(strings.h HAVE_STRINGS_H)
|
||||
CHECK_INCLUDE_FILES(unistd.h HAVE_UNISTD_H)
|
||||
|
||||
CHECK_INCLUDE_FILES(inttypes.h HAVE_INTTYPES_H_LIBZIP)
|
||||
CHECK_INCLUDE_FILES(stdint.h HAVE_STDINT_H_LIBZIP)
|
||||
CHECK_INCLUDE_FILES(sys/types.h HAVE_SYS_TYPES_H_LIBZIP)
|
||||
|
||||
# TODO: fix test
|
||||
# this test does not find __progname even when it exists
|
||||
#CHECK_SYMBOL_EXISTS(__progname stdlib.h HAVE___PROGNAME)
|
||||
|
||||
CHECK_TYPE_SIZE(__int8 __INT8_LIBZIP)
|
||||
CHECK_TYPE_SIZE(int8_t INT8_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(uint8_t UINT8_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(__int16 __INT16_LIBZIP)
|
||||
CHECK_TYPE_SIZE(int16_t INT16_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(uint16_t UINT16_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(__int32 __INT32_LIBZIP)
|
||||
CHECK_TYPE_SIZE(int32_t INT32_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(uint32_t UINT32_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(__int64 __INT64_LIBZIP)
|
||||
CHECK_TYPE_SIZE(int64_t INT64_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE(uint64_t UINT64_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE("short" SHORT_LIBZIP)
|
||||
CHECK_TYPE_SIZE("int" INT_LIBZIP)
|
||||
CHECK_TYPE_SIZE("long" LONG_LIBZIP)
|
||||
CHECK_TYPE_SIZE("long long" LONG_LONG_LIBZIP)
|
||||
CHECK_TYPE_SIZE("off_t" SIZEOF_OFF_T)
|
||||
CHECK_TYPE_SIZE("size_t" SIZE_T_LIBZIP)
|
||||
CHECK_TYPE_SIZE("ssize_t" SSIZE_T_LIBZIP)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("#include <sys/ioctl.h>
|
||||
#include <linux/fs.h>
|
||||
int main(int argc, char *argv[]) { unsigned long x = FICLONERANGE; }" HAVE_FICLONERANGE)
|
||||
|
||||
CHECK_C_SOURCE_COMPILES("
|
||||
int foo(char * _Nullable bar);
|
||||
int main(int argc, char *argv[]) { }" HAVE_NULLABLE)
|
||||
|
||||
TEST_BIG_ENDIAN(WORDS_BIGENDIAN)
|
||||
|
||||
#FIND_PACKAGE(ZLIB 1.1.2 REQUIRED)
|
||||
INCLUDE_DIRECTORIES(../zlib/zlib)
|
||||
SET(CMAKE_REQUIRED_INCLUDES ../zlib/zlib)
|
||||
|
||||
IF(ENABLE_BZIP2)
|
||||
FIND_PACKAGE(BZip2)
|
||||
IF(BZIP2_FOUND)
|
||||
SET (HAVE_LIBBZ2 1)
|
||||
|
||||
INCLUDE_DIRECTORIES(${BZIP2_INCLUDE_DIR})
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} ${BZIP2_LIBRARIES})
|
||||
ELSE()
|
||||
MESSAGE(WARNING "-- bzip2 library not found; bzip2 support disabled")
|
||||
ENDIF(BZIP2_FOUND)
|
||||
ENDIF(ENABLE_BZIP2)
|
||||
|
||||
IF(ENABLE_LZMA)
|
||||
FIND_PACKAGE(LibLZMA)
|
||||
IF(LIBLZMA_FOUND)
|
||||
SET (HAVE_LIBLZMA 1)
|
||||
|
||||
INCLUDE_DIRECTORIES(${LIBLZMA_INCLUDE_DIR})
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} ${LIBLZMA_LIBRARY})
|
||||
ELSE()
|
||||
MESSAGE(WARNING "-- lzma library not found; lzma support disabled")
|
||||
ENDIF(LIBLZMA_FOUND)
|
||||
ENDIF(ENABLE_LZMA)
|
||||
|
||||
|
||||
IF (COMMONCRYPTO_FOUND)
|
||||
SET (HAVE_CRYPTO 1)
|
||||
SET (HAVE_COMMONCRYPTO 1)
|
||||
ELSEIF (WINDOWS_CRYPTO_FOUND)
|
||||
SET (HAVE_CRYPTO 1)
|
||||
SET (HAVE_WINDOWS_CRYPTO 1)
|
||||
ELSEIF (GNUTLS_FOUND AND NETTLE_FOUND)
|
||||
SET (HAVE_CRYPTO 1)
|
||||
SET (HAVE_GNUTLS 1)
|
||||
INCLUDE_DIRECTORIES(${GNUTLS_INCLUDE_DIR} ${NETTLE_INCLUDE_DIR})
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} ${GNUTLS_LIBRARY} ${NETTLE_LIBRARY})
|
||||
ELSEIF (OPENSSL_FOUND)
|
||||
SET (HAVE_CRYPTO 1)
|
||||
SET (HAVE_OPENSSL 1)
|
||||
INCLUDE_DIRECTORIES(${OPENSSL_INCLUDE_DIR})
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} ${OPENSSL_LIBRARIES})
|
||||
ELSEIF (MBEDTLS_LIBRARIES)
|
||||
SET (HAVE_CRYPTO 1)
|
||||
SET (HAVE_MBEDTLS 1)
|
||||
INCLUDE_DIRECTORIES(${MBEDTLS_INCLUDE_DIR})
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} ${MBEDTLS_LIBRARIES})
|
||||
ENDIF()
|
||||
|
||||
IF (NOT HAVE_CRYPTO)
|
||||
MESSAGE(WARNING "-- neither Common Crypto, GnuTLS, mbed TLS, OpenSSL, nor Windows Cryptography found; AES support disabled")
|
||||
ENDIF()
|
||||
|
||||
IF(MSVC)
|
||||
ADD_DEFINITIONS("-D_CRT_SECURE_NO_WARNINGS")
|
||||
ADD_DEFINITIONS("-D_CRT_NONSTDC_NO_DEPRECATE")
|
||||
ENDIF(MSVC)
|
||||
|
||||
if(WIN32)
|
||||
if(HAVE_WINDOWS_CRYPTO)
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} bcrypt)
|
||||
endif()
|
||||
if(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
|
||||
ADD_DEFINITIONS(-DMS_UWP)
|
||||
else(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
|
||||
SET (OPTIONAL_LIBRARY ${OPTIONAL_LIBRARY} advapi32)
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
|
||||
endif(WIN32)
|
||||
|
||||
ADD_DEFINITIONS("-DHAVE_CONFIG_H")
|
||||
|
||||
# rpath handling: use rpath in installed binaries
|
||||
IF(NOT CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
|
||||
ENDIF()
|
||||
|
||||
# fixed size integral types
|
||||
|
||||
IF(HAVE_INTTYPES_H_LIBZIP)
|
||||
SET(LIBZIP_TYPES_INCLUDE "#define __STDC_FORMAT_MACROS 1
|
||||
#include <inttypes.h>")
|
||||
ELSEIF(HAVE_STDINT_H_LIBZIP)
|
||||
SET(LIBZIP_TYPES_INCLUDE "#include <stdint.h>")
|
||||
ELSEIF(HAVE_SYS_TYPES_H_LIBZIP)
|
||||
SET(LIBZIP_TYPES_INCLUDE "#include <sys/types.h>")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_INT8_T_LIBZIP)
|
||||
SET(ZIP_INT8_T int8_t)
|
||||
ELSEIF(HAVE___INT8_LIBZIP)
|
||||
SET(ZIP_INT8_T __int8)
|
||||
ELSE()
|
||||
SET(ZIP_INT8_T "signed char")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_UINT8_T_LIBZIP)
|
||||
SET(ZIP_UINT8_T uint8_t)
|
||||
ELSEIF(HAVE___INT8_LIBZIP)
|
||||
SET(ZIP_UINT8_T "unsigned __int8")
|
||||
ELSE()
|
||||
SET(ZIP_UINT8_T "unsigned char")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_INT16_T_LIBZIP)
|
||||
SET(ZIP_INT16_T int16_t)
|
||||
ELSEIF(HAVE___INT16_LIBZIP)
|
||||
SET(INT16_T_LIBZIP __int16)
|
||||
ELSEIF(SHORT_LIBZIP EQUAL 2)
|
||||
SET(INT16_T_LIBZIP short)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_UINT16_T_LIBZIP)
|
||||
SET(ZIP_UINT16_T uint16_t)
|
||||
ELSEIF(HAVE___INT16_LIBZIP)
|
||||
SET(UINT16_T_LIBZIP "unsigned __int16")
|
||||
ELSEIF(SHORT_LIBZIP EQUAL 2)
|
||||
SET(UINT16_T_LIBZIP "unsigned short")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_INT32_T_LIBZIP)
|
||||
SET(ZIP_INT32_T int32_t)
|
||||
ELSEIF(HAVE___INT32_LIBZIP)
|
||||
SET(ZIP_INT32_T __int32)
|
||||
ELSEIF(INT_LIBZIP EQUAL 4)
|
||||
SET(ZIP_INT32_T int)
|
||||
ELSEIF(LONG_LIBZIP EQUAL 4)
|
||||
SET(ZIP_INT32_T long)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_UINT32_T_LIBZIP)
|
||||
SET(ZIP_UINT32_T uint32_t)
|
||||
ELSEIF(HAVE___INT32_LIBZIP)
|
||||
SET(ZIP_UINT32_T "unsigned __int32")
|
||||
ELSEIF(INT_LIBZIP EQUAL 4)
|
||||
SET(ZIP_UINT32_T "unsigned int")
|
||||
ELSEIF(LONG_LIBZIP EQUAL 4)
|
||||
SET(ZIP_UINT32_T "unsigned long")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_INT64_T_LIBZIP)
|
||||
SET(ZIP_INT64_T int64_t)
|
||||
ELSEIF(HAVE___INT64_LIBZIP)
|
||||
SET(ZIP_INT64_T __int64)
|
||||
ELSEIF(LONG_LIBZIP EQUAL 8)
|
||||
SET(ZIP_INT64_T long)
|
||||
ELSEIF(LONG_LONG_LIBZIP EQUAL 8)
|
||||
SET(ZIP_INT64_T "long long")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_UINT64_T_LIBZIP)
|
||||
SET(ZIP_UINT64_T uint64_t)
|
||||
ELSEIF(HAVE___INT64_LIBZIP)
|
||||
SET(ZIP_UINT64_T "unsigned __int64")
|
||||
ELSEIF(LONG_LIBZIP EQUAL 8)
|
||||
SET(ZIP_UINT64_T "unsigned long")
|
||||
ELSEIF(LONG_LONG_LIBZIP EQUAL 8)
|
||||
SET(ZIP_UINT64_T "unsigned long long")
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_NULLABLE)
|
||||
SET(ZIP_NULLABLE_DEFINES)
|
||||
ELSE()
|
||||
SET(ZIP_NULLABLE_DEFINES "#define _Nullable
|
||||
#define _Nonnull")
|
||||
ENDIF()
|
||||
|
||||
# write out config file
|
||||
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/libzip/cmake-config.h.in ${CMAKE_CURRENT_BINARY_DIR}/libzip/config.h)
|
||||
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/libzip/cmake-zipconf.h.in ${CMAKE_CURRENT_BINARY_DIR}/libzip/zipconf.h)
|
||||
|
||||
# installation
|
||||
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/libzip/zipconf.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
INSTALL(FILES libzip/lib/zip.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
|
||||
SET(CMAKE_C_VISIBILITY_PRESET hidden)
|
||||
|
||||
ADD_LIBRARY(zip
|
||||
libzip/lib/zip_add.c
|
||||
libzip/lib/zip_add_dir.c
|
||||
libzip/lib/zip_add_entry.c
|
||||
libzip/lib/zip_algorithm_deflate.c
|
||||
libzip/lib/zip_buffer.c
|
||||
libzip/lib/zip_close.c
|
||||
libzip/lib/zip_delete.c
|
||||
libzip/lib/zip_dir_add.c
|
||||
libzip/lib/zip_dirent.c
|
||||
libzip/lib/zip_discard.c
|
||||
libzip/lib/zip_entry.c
|
||||
libzip/lib/zip_err_str.c
|
||||
libzip/lib/zip_error.c
|
||||
libzip/lib/zip_error_clear.c
|
||||
libzip/lib/zip_error_get.c
|
||||
libzip/lib/zip_error_get_sys_type.c
|
||||
libzip/lib/zip_error_strerror.c
|
||||
libzip/lib/zip_error_to_str.c
|
||||
libzip/lib/zip_extra_field.c
|
||||
libzip/lib/zip_extra_field_api.c
|
||||
libzip/lib/zip_fclose.c
|
||||
libzip/lib/zip_fdopen.c
|
||||
libzip/lib/zip_file_add.c
|
||||
libzip/lib/zip_file_error_clear.c
|
||||
libzip/lib/zip_file_error_get.c
|
||||
libzip/lib/zip_file_get_comment.c
|
||||
libzip/lib/zip_file_get_external_attributes.c
|
||||
libzip/lib/zip_file_get_offset.c
|
||||
libzip/lib/zip_file_rename.c
|
||||
libzip/lib/zip_file_replace.c
|
||||
libzip/lib/zip_file_set_comment.c
|
||||
libzip/lib/zip_file_set_encryption.c
|
||||
libzip/lib/zip_file_set_external_attributes.c
|
||||
libzip/lib/zip_file_set_mtime.c
|
||||
libzip/lib/zip_file_strerror.c
|
||||
libzip/lib/zip_filerange_crc.c
|
||||
libzip/lib/zip_fopen.c
|
||||
libzip/lib/zip_fopen_encrypted.c
|
||||
libzip/lib/zip_fopen_index.c
|
||||
libzip/lib/zip_fopen_index_encrypted.c
|
||||
libzip/lib/zip_fread.c
|
||||
libzip/lib/zip_fseek.c
|
||||
libzip/lib/zip_ftell.c
|
||||
libzip/lib/zip_get_archive_comment.c
|
||||
libzip/lib/zip_get_archive_flag.c
|
||||
libzip/lib/zip_get_encryption_implementation.c
|
||||
libzip/lib/zip_get_file_comment.c
|
||||
libzip/lib/zip_get_name.c
|
||||
libzip/lib/zip_get_num_entries.c
|
||||
libzip/lib/zip_get_num_files.c
|
||||
libzip/lib/zip_hash.c
|
||||
libzip/lib/zip_io_util.c
|
||||
libzip/lib/zip_libzip_version.c
|
||||
libzip/lib/zip_memdup.c
|
||||
libzip/lib/zip_name_locate.c
|
||||
libzip/lib/zip_new.c
|
||||
libzip/lib/zip_open.c
|
||||
libzip/lib/zip_progress.c
|
||||
libzip/lib/zip_rename.c
|
||||
libzip/lib/zip_replace.c
|
||||
libzip/lib/zip_set_archive_comment.c
|
||||
libzip/lib/zip_set_archive_flag.c
|
||||
libzip/lib/zip_set_default_password.c
|
||||
libzip/lib/zip_set_file_comment.c
|
||||
libzip/lib/zip_set_file_compression.c
|
||||
libzip/lib/zip_set_name.c
|
||||
libzip/lib/zip_source_accept_empty.c
|
||||
libzip/lib/zip_source_begin_write.c
|
||||
libzip/lib/zip_source_begin_write_cloning.c
|
||||
libzip/lib/zip_source_buffer.c
|
||||
libzip/lib/zip_source_call.c
|
||||
libzip/lib/zip_source_close.c
|
||||
libzip/lib/zip_source_commit_write.c
|
||||
libzip/lib/zip_source_compress.c
|
||||
libzip/lib/zip_source_crc.c
|
||||
libzip/lib/zip_source_error.c
|
||||
libzip/lib/zip_source_filep.c
|
||||
libzip/lib/zip_source_free.c
|
||||
libzip/lib/zip_source_function.c
|
||||
libzip/lib/zip_source_get_compression_flags.c
|
||||
libzip/lib/zip_source_is_deleted.c
|
||||
libzip/lib/zip_source_layered.c
|
||||
libzip/lib/zip_source_open.c
|
||||
libzip/lib/zip_source_pkware.c
|
||||
libzip/lib/zip_source_read.c
|
||||
libzip/lib/zip_source_remove.c
|
||||
libzip/lib/zip_source_rollback_write.c
|
||||
libzip/lib/zip_source_seek.c
|
||||
libzip/lib/zip_source_seek_write.c
|
||||
libzip/lib/zip_source_stat.c
|
||||
libzip/lib/zip_source_supports.c
|
||||
libzip/lib/zip_source_tell.c
|
||||
libzip/lib/zip_source_tell_write.c
|
||||
libzip/lib/zip_source_window.c
|
||||
libzip/lib/zip_source_write.c
|
||||
libzip/lib/zip_source_zip.c
|
||||
libzip/lib/zip_source_zip_new.c
|
||||
libzip/lib/zip_stat.c
|
||||
libzip/lib/zip_stat_index.c
|
||||
libzip/lib/zip_stat_init.c
|
||||
libzip/lib/zip_strerror.c
|
||||
libzip/lib/zip_string.c
|
||||
libzip/lib/zip_unchange.c
|
||||
libzip/lib/zip_unchange_all.c
|
||||
libzip/lib/zip_unchange_archive.c
|
||||
libzip/lib/zip_unchange_data.c
|
||||
libzip/lib/zip_utf-8.c
|
||||
)
|
||||
|
||||
IF(WIN32)
|
||||
target_sources(zip PRIVATE
|
||||
libzip/lib/zip_source_win32handle.c
|
||||
libzip/lib/zip_source_win32utf8.c
|
||||
libzip/lib/zip_source_win32w.c
|
||||
)
|
||||
IF(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
|
||||
ELSE()
|
||||
target_sources(zip PRIVATE libzip/lib/zip_source_win32a.c)
|
||||
ENDIF()
|
||||
ELSE()
|
||||
target_sources(zip PRIVATE
|
||||
libzip/lib/zip_mkstempm.c
|
||||
libzip/lib/zip_source_file.c
|
||||
libzip/lib/zip_random_unix.c
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_LIBBZ2)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_algorithm_bzip2.c)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_LIBLZMA)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_algorithm_xz.c)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_COMMONCRYPTO)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_crypto_commoncrypto.c)
|
||||
ELSEIF(HAVE_WINDOWS_CRYPTO)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_crypto_win.c)
|
||||
ELSEIF(HAVE_GNUTLS)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_crypto_gnutls.c)
|
||||
ELSEIF(HAVE_OPENSSL)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_crypto_openssl.c)
|
||||
ELSEIF(HAVE_MBEDTLS)
|
||||
target_sources(zip PRIVATE libzip/lib/zip_crypto_mbedtls.c)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_CRYPTO)
|
||||
target_sources(zip PRIVATE
|
||||
libzip/lib/zip_winzip_aes.c
|
||||
libzip/lib/zip_source_winzip_aes_decode.c
|
||||
libzip/lib/zip_source_winzip_aes_encode.c
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
target_include_directories(zip
|
||||
PUBLIC
|
||||
libzip/lib
|
||||
${CMAKE_CURRENT_BINARY_DIR}/libzip
|
||||
)
|
||||
|
||||
# pkgconfig file
|
||||
SET(prefix ${CMAKE_INSTALL_PREFIX})
|
||||
SET(exec_prefix \${prefix})
|
||||
SET(bindir \${exec_prefix}/${CMAKE_INSTALL_BINDIR})
|
||||
SET(libdir \${exec_prefix}/${CMAKE_INSTALL_LIBDIR})
|
||||
SET(includedir \${prefix}/${CMAKE_INSTALL_INCLUDEDIR})
|
||||
IF(CMAKE_SYSTEM_NAME MATCHES BSD)
|
||||
SET(PKG_CONFIG_RPATH "-Wl,-R\${libdir}")
|
||||
ENDIF(CMAKE_SYSTEM_NAME MATCHES BSD)
|
||||
get_target_property(LIBS_PRIVATE zip LINK_LIBRARIES)
|
||||
foreach(LIB ${LIBS_PRIVATE})
|
||||
if(LIB MATCHES "^/")
|
||||
get_filename_component(LIB ${LIB} NAME_WE)
|
||||
string(REGEX REPLACE "^lib" "" LIB ${LIB})
|
||||
endif()
|
||||
set(LIBS "${LIBS} -l${LIB}")
|
||||
endforeach()
|
||||
CONFIGURE_FILE(libzip/libzip.pc.in libzip/libzip.pc @ONLY)
|
||||
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/libzip.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
|
||||
ADD_CUSTOM_TARGET(update_zip_err_str
|
||||
COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/libzip/lib/make_zip_err_str.sh ${CMAKE_CURRENT_SOURCE_DIR}/libzip/lib/zip.h ${CMAKE_CURRENT_SOURCE_DIR}/libzip/lib/zip_err_str.c
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/libzip/lib/zip.h ${CMAKE_CURRENT_SOURCE_DIR}/libzip/lib/make_zip_err_str.sh
|
||||
)
|
||||
|
||||
IF(SHARED_LIB_VERSIONNING)
|
||||
SET_TARGET_PROPERTIES(zip PROPERTIES VERSION 5.0 SOVERSION 5)
|
||||
ENDIF()
|
||||
|
||||
TARGET_LINK_LIBRARIES(zip ${ZLIB_LIBRARIES} ${OPTIONAL_LIBRARY})
|
||||
INSTALL(TARGETS zip
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
)
|
||||
|
||||
1
externals/libzip/libzip
vendored
Submodule
1
externals/libzip/libzip
vendored
Submodule
Submodule externals/libzip/libzip added at 89bd6d63bd
6
externals/microprofile/microprofile.h
vendored
6
externals/microprofile/microprofile.h
vendored
@@ -814,7 +814,7 @@ struct MicroProfile
|
||||
|
||||
inline int MicroProfileLogType(MicroProfileLogEntry Index)
|
||||
{
|
||||
return ((MP_LOG_BEGIN_MASK & Index)>>62) & 0x3;
|
||||
return (int)(((MP_LOG_BEGIN_MASK & Index)>>62) & 0x3ULL);
|
||||
}
|
||||
|
||||
inline uint64_t MicroProfileLogTimerIndex(MicroProfileLogEntry Index)
|
||||
@@ -861,12 +861,12 @@ T MicroProfileMax(T a, T b)
|
||||
|
||||
inline int64_t MicroProfileMsToTick(float fMs, int64_t nTicksPerSecond)
|
||||
{
|
||||
return (int64_t)(fMs*0.001f*nTicksPerSecond);
|
||||
return (int64_t)(fMs*0.001f*(float)nTicksPerSecond);
|
||||
}
|
||||
|
||||
inline float MicroProfileTickToMsMultiplier(int64_t nTicksPerSecond)
|
||||
{
|
||||
return 1000.f / nTicksPerSecond;
|
||||
return 1000.f / (float)nTicksPerSecond;
|
||||
}
|
||||
|
||||
inline uint16_t MicroProfileGetGroupIndex(MicroProfileToken t)
|
||||
|
||||
2
externals/sirit
vendored
2
externals/sirit
vendored
Submodule externals/sirit updated: f7c4b07a7e...12f40a8032
81
externals/zlib/CMakeLists.txt
vendored
Normal file
81
externals/zlib/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
project(zlib C)
|
||||
|
||||
include(CheckTypeSize)
|
||||
include(CheckFunctionExists)
|
||||
include(CheckIncludeFile)
|
||||
|
||||
check_include_file(sys/types.h HAVE_SYS_TYPES_H)
|
||||
check_include_file(stdint.h HAVE_STDINT_H)
|
||||
check_include_file(stddef.h HAVE_STDDEF_H)
|
||||
|
||||
# Check to see if we have large file support
|
||||
set(CMAKE_REQUIRED_DEFINITIONS -D_LARGEFILE64_SOURCE=1)
|
||||
# We add these other definitions here because CheckTypeSize.cmake
|
||||
# in CMake 2.4.x does not automatically do so and we want
|
||||
# compatibility with CMake 2.4.x.
|
||||
if(HAVE_SYS_TYPES_H)
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_SYS_TYPES_H)
|
||||
endif()
|
||||
if(HAVE_STDINT_H)
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDINT_H)
|
||||
endif()
|
||||
if(HAVE_STDDEF_H)
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -DHAVE_STDDEF_H)
|
||||
endif()
|
||||
check_type_size(off64_t OFF64_T)
|
||||
if(HAVE_OFF64_T)
|
||||
add_definitions(-D_LARGEFILE64_SOURCE=1)
|
||||
endif()
|
||||
set(CMAKE_REQUIRED_DEFINITIONS) # clear variable
|
||||
|
||||
# Check for fseeko
|
||||
check_function_exists(fseeko HAVE_FSEEKO)
|
||||
if(NOT HAVE_FSEEKO)
|
||||
add_definitions(-DNO_FSEEKO)
|
||||
endif()
|
||||
|
||||
# Check for unistd.h
|
||||
check_include_file(unistd.h HAVE_UNISTD_H)
|
||||
if(HAVE_UNISTD_H)
|
||||
add_definitions(-DHAVE_UNISTD_H)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
add_definitions(-D_CRT_SECURE_NO_DEPRECATE)
|
||||
add_definitions(-D_CRT_NONSTDC_NO_DEPRECATE)
|
||||
endif()
|
||||
|
||||
add_library(z STATIC
|
||||
zlib/adler32.c
|
||||
zlib/compress.c
|
||||
zlib/crc32.c
|
||||
zlib/crc32.h
|
||||
zlib/deflate.c
|
||||
zlib/deflate.h
|
||||
zlib/gzclose.c
|
||||
zlib/gzguts.h
|
||||
zlib/gzlib.c
|
||||
zlib/gzread.c
|
||||
zlib/gzwrite.c
|
||||
zlib/inffast.h
|
||||
zlib/inffixed.h
|
||||
zlib/inflate.c
|
||||
zlib/inflate.h
|
||||
zlib/infback.c
|
||||
zlib/inftrees.c
|
||||
zlib/inftrees.h
|
||||
zlib/inffast.c
|
||||
zlib/trees.c
|
||||
zlib/trees.h
|
||||
zlib/uncompr.c
|
||||
zlib/zconf.h
|
||||
zlib/zlib.h
|
||||
zlib/zutil.c
|
||||
zlib/zutil.h
|
||||
)
|
||||
add_library(ZLIB::ZLIB ALIAS z)
|
||||
|
||||
target_include_directories(z
|
||||
PUBLIC
|
||||
zlib/
|
||||
)
|
||||
0
externals/zlib → externals/zlib/zlib
vendored
0
externals/zlib → externals/zlib/zlib
vendored
@@ -36,9 +36,9 @@ public:
|
||||
}
|
||||
|
||||
void SetWaveIndex(std::size_t index);
|
||||
std::vector<s16> DequeueSamples(std::size_t sample_count);
|
||||
std::vector<s16> DequeueSamples(std::size_t sample_count, Memory::Memory& memory);
|
||||
void UpdateState();
|
||||
void RefreshBuffer();
|
||||
void RefreshBuffer(Memory::Memory& memory);
|
||||
|
||||
private:
|
||||
bool is_in_use{};
|
||||
@@ -66,17 +66,18 @@ public:
|
||||
return info;
|
||||
}
|
||||
|
||||
void UpdateState();
|
||||
void UpdateState(Memory::Memory& memory);
|
||||
|
||||
private:
|
||||
EffectOutStatus out_status{};
|
||||
EffectInStatus info{};
|
||||
};
|
||||
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params,
|
||||
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event,
|
||||
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_,
|
||||
AudioRendererParameter params,
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event,
|
||||
std::size_t instance_number)
|
||||
: worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
|
||||
effects(params.effect_count) {
|
||||
effects(params.effect_count), memory{memory_} {
|
||||
|
||||
audio_out = std::make_unique<AudioCore::AudioOut>();
|
||||
stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
|
||||
@@ -162,7 +163,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
|
||||
}
|
||||
|
||||
for (auto& effect : effects) {
|
||||
effect.UpdateState();
|
||||
effect.UpdateState(memory);
|
||||
}
|
||||
|
||||
// Release previous buffers and queue next ones for playback
|
||||
@@ -206,13 +207,14 @@ void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
|
||||
is_refresh_pending = true;
|
||||
}
|
||||
|
||||
std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count) {
|
||||
std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count,
|
||||
Memory::Memory& memory) {
|
||||
if (!IsPlaying()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (is_refresh_pending) {
|
||||
RefreshBuffer();
|
||||
RefreshBuffer(memory);
|
||||
}
|
||||
|
||||
const std::size_t max_size{samples.size() - offset};
|
||||
@@ -256,10 +258,11 @@ void AudioRenderer::VoiceState::UpdateState() {
|
||||
is_in_use = info.is_in_use;
|
||||
}
|
||||
|
||||
void AudioRenderer::VoiceState::RefreshBuffer() {
|
||||
std::vector<s16> new_samples(info.wave_buffer[wave_index].buffer_sz / sizeof(s16));
|
||||
Memory::ReadBlock(info.wave_buffer[wave_index].buffer_addr, new_samples.data(),
|
||||
info.wave_buffer[wave_index].buffer_sz);
|
||||
void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) {
|
||||
const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr;
|
||||
const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz;
|
||||
std::vector<s16> new_samples(wave_buffer_size / sizeof(s16));
|
||||
memory.ReadBlock(wave_buffer_address, new_samples.data(), wave_buffer_size);
|
||||
|
||||
switch (static_cast<Codec::PcmFormat>(info.sample_format)) {
|
||||
case Codec::PcmFormat::Int16: {
|
||||
@@ -269,7 +272,7 @@ void AudioRenderer::VoiceState::RefreshBuffer() {
|
||||
case Codec::PcmFormat::Adpcm: {
|
||||
// Decode ADPCM to PCM16
|
||||
Codec::ADPCM_Coeff coeffs;
|
||||
Memory::ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff));
|
||||
memory.ReadBlock(info.additional_params_addr, coeffs.data(), sizeof(Codec::ADPCM_Coeff));
|
||||
new_samples = Codec::DecodeADPCM(reinterpret_cast<u8*>(new_samples.data()),
|
||||
new_samples.size() * sizeof(s16), coeffs, adpcm_state);
|
||||
break;
|
||||
@@ -307,18 +310,18 @@ void AudioRenderer::VoiceState::RefreshBuffer() {
|
||||
is_refresh_pending = false;
|
||||
}
|
||||
|
||||
void AudioRenderer::EffectState::UpdateState() {
|
||||
void AudioRenderer::EffectState::UpdateState(Memory::Memory& memory) {
|
||||
if (info.is_new) {
|
||||
out_status.state = EffectStatus::New;
|
||||
} else {
|
||||
if (info.type == Effect::Aux) {
|
||||
ASSERT_MSG(Memory::Read32(info.aux_info.return_buffer_info) == 0,
|
||||
ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_info) == 0,
|
||||
"Aux buffers tried to update");
|
||||
ASSERT_MSG(Memory::Read32(info.aux_info.send_buffer_info) == 0,
|
||||
ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_info) == 0,
|
||||
"Aux buffers tried to update");
|
||||
ASSERT_MSG(Memory::Read32(info.aux_info.return_buffer_base) == 0,
|
||||
ASSERT_MSG(memory.Read32(info.aux_info.return_buffer_base) == 0,
|
||||
"Aux buffers tried to update");
|
||||
ASSERT_MSG(Memory::Read32(info.aux_info.send_buffer_base) == 0,
|
||||
ASSERT_MSG(memory.Read32(info.aux_info.send_buffer_base) == 0,
|
||||
"Aux buffers tried to update");
|
||||
}
|
||||
}
|
||||
@@ -340,7 +343,7 @@ void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
|
||||
std::size_t offset{};
|
||||
s64 samples_remaining{BUFFER_SIZE};
|
||||
while (samples_remaining > 0) {
|
||||
const std::vector<s16> samples{voice.DequeueSamples(samples_remaining)};
|
||||
const std::vector<s16> samples{voice.DequeueSamples(samples_remaining, memory)};
|
||||
|
||||
if (samples.empty()) {
|
||||
break;
|
||||
|
||||
@@ -22,6 +22,10 @@ namespace Kernel {
|
||||
class WritableEvent;
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
class AudioOut;
|
||||
@@ -217,9 +221,9 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size
|
||||
|
||||
class AudioRenderer {
|
||||
public:
|
||||
AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params,
|
||||
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event,
|
||||
std::size_t instance_number);
|
||||
AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_,
|
||||
AudioRendererParameter params,
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number);
|
||||
~AudioRenderer();
|
||||
|
||||
std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params);
|
||||
@@ -235,11 +239,12 @@ private:
|
||||
class VoiceState;
|
||||
|
||||
AudioRendererParameter worker_params;
|
||||
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event;
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event;
|
||||
std::vector<VoiceState> voices;
|
||||
std::vector<EffectState> effects;
|
||||
std::unique_ptr<AudioOut> audio_out;
|
||||
AudioCore::StreamPtr stream;
|
||||
StreamPtr stream;
|
||||
Memory::Memory& memory;
|
||||
};
|
||||
|
||||
} // namespace AudioCore
|
||||
|
||||
@@ -37,7 +37,7 @@ Stream::Stream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, Format fo
|
||||
: sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)},
|
||||
sink_stream{sink_stream}, core_timing{core_timing}, name{std::move(name_)} {
|
||||
|
||||
release_event = core_timing.RegisterEvent(
|
||||
release_event = Core::Timing::CreateEvent(
|
||||
name, [this](u64 userdata, s64 cycles_late) { ReleaseActiveBuffer(); });
|
||||
}
|
||||
|
||||
|
||||
@@ -98,18 +98,19 @@ private:
|
||||
/// Gets the number of core cycles when the specified buffer will be released
|
||||
s64 GetBufferReleaseCycles(const Buffer& buffer) const;
|
||||
|
||||
u32 sample_rate; ///< Sample rate of the stream
|
||||
Format format; ///< Format of the stream
|
||||
float game_volume = 1.0f; ///< The volume the game currently has set
|
||||
ReleaseCallback release_callback; ///< Buffer release callback for the stream
|
||||
State state{State::Stopped}; ///< Playback state of the stream
|
||||
Core::Timing::EventType* release_event{}; ///< Core timing release event for the stream
|
||||
BufferPtr active_buffer; ///< Actively playing buffer in the stream
|
||||
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
|
||||
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
|
||||
SinkStream& sink_stream; ///< Output sink for the stream
|
||||
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
|
||||
std::string name; ///< Name of the stream, must be unique
|
||||
u32 sample_rate; ///< Sample rate of the stream
|
||||
Format format; ///< Format of the stream
|
||||
float game_volume = 1.0f; ///< The volume the game currently has set
|
||||
ReleaseCallback release_callback; ///< Buffer release callback for the stream
|
||||
State state{State::Stopped}; ///< Playback state of the stream
|
||||
std::shared_ptr<Core::Timing::EventType>
|
||||
release_event; ///< Core timing release event for the stream
|
||||
BufferPtr active_buffer; ///< Actively playing buffer in the stream
|
||||
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
|
||||
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
|
||||
SinkStream& sink_stream; ///< Output sink for the stream
|
||||
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
|
||||
std::string name; ///< Name of the stream, must be unique
|
||||
};
|
||||
|
||||
using StreamPtr = std::shared_ptr<Stream>;
|
||||
|
||||
@@ -3,17 +3,8 @@
|
||||
# could affect the result, but much more unlikely than the following files. Keeping a list of files
|
||||
# like this allows for much better caching since it doesn't force the user to recompile binary shaders every update
|
||||
set(VIDEO_CORE "${CMAKE_SOURCE_DIR}/src/video_core")
|
||||
if (DEFINED ENV{CI})
|
||||
if (DEFINED ENV{TRAVIS})
|
||||
set(BUILD_REPOSITORY $ENV{TRAVIS_REPO_SLUG})
|
||||
set(BUILD_TAG $ENV{TRAVIS_TAG})
|
||||
elseif(DEFINED ENV{APPVEYOR})
|
||||
set(BUILD_REPOSITORY $ENV{APPVEYOR_REPO_NAME})
|
||||
set(BUILD_TAG $ENV{APPVEYOR_REPO_TAG_NAME})
|
||||
elseif(DEFINED ENV{AZURE})
|
||||
set(BUILD_REPOSITORY $ENV{AZURE_REPO_NAME})
|
||||
set(BUILD_TAG $ENV{AZURE_REPO_TAG})
|
||||
endif()
|
||||
if (DEFINED ENV{AZURECIREPO})
|
||||
set(BUILD_REPOSITORY $ENV{AZURECIREPO})
|
||||
endif()
|
||||
if (DEFINED ENV{TITLEBARFORMATIDLE})
|
||||
set(TITLE_BAR_FORMAT_IDLE $ENV{TITLEBARFORMATIDLE})
|
||||
|
||||
@@ -28,21 +28,19 @@ __declspec(noinline, noreturn)
|
||||
}
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed!"); \
|
||||
}
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed! " __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define UNREACHABLE() ASSERT_MSG(false, "Unreachable code!")
|
||||
#define UNREACHABLE_MSG(...) ASSERT_MSG(false, __VA_ARGS__)
|
||||
#define UNREACHABLE() \
|
||||
{ LOG_CRITICAL(Debug, "Unreachable code!"); }
|
||||
#define UNREACHABLE_MSG(...) \
|
||||
{ LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); }
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
|
||||
@@ -36,6 +36,13 @@
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/swap.h"
|
||||
|
||||
// Inlining
|
||||
#ifdef _WIN32
|
||||
#define FORCE_INLINE __forceinline
|
||||
#else
|
||||
#define FORCE_INLINE inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Abstract bitfield class
|
||||
*
|
||||
@@ -168,11 +175,11 @@ public:
|
||||
constexpr BitField(BitField&&) noexcept = default;
|
||||
constexpr BitField& operator=(BitField&&) noexcept = default;
|
||||
|
||||
constexpr FORCE_INLINE operator T() const {
|
||||
constexpr operator T() const {
|
||||
return Value();
|
||||
}
|
||||
|
||||
constexpr FORCE_INLINE void Assign(const T& value) {
|
||||
constexpr void Assign(const T& value) {
|
||||
storage = (static_cast<StorageType>(storage) & ~mask) | FormatValue(value);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <string>
|
||||
|
||||
#if !defined(ARCHITECTURE_x86_64)
|
||||
@@ -16,18 +17,17 @@
|
||||
#define CONCAT2(x, y) DO_CONCAT2(x, y)
|
||||
#define DO_CONCAT2(x, y) x##y
|
||||
|
||||
// helper macro to properly align structure members.
|
||||
// Calling INSERT_PADDING_BYTES will add a new member variable with a name like "pad121",
|
||||
// depending on the current source line to make sure variable names are unique.
|
||||
#define INSERT_PADDING_BYTES(num_bytes) u8 CONCAT2(pad, __LINE__)[(num_bytes)]
|
||||
#define INSERT_PADDING_WORDS(num_words) u32 CONCAT2(pad, __LINE__)[(num_words)]
|
||||
/// Helper macros to insert unused bytes or words to properly align structs. These values will be
|
||||
/// zero-initialized.
|
||||
#define INSERT_PADDING_BYTES(num_bytes) \
|
||||
std::array<u8, num_bytes> CONCAT2(pad, __LINE__) {}
|
||||
#define INSERT_PADDING_WORDS(num_words) \
|
||||
std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
|
||||
|
||||
// Inlining
|
||||
#ifdef _WIN32
|
||||
#define FORCE_INLINE __forceinline
|
||||
#else
|
||||
#define FORCE_INLINE inline __attribute__((always_inline))
|
||||
#endif
|
||||
/// These are similar to the INSERT_PADDING_* macros, but are needed for padding unions. This is
|
||||
/// because unions can only be initialized by one member.
|
||||
#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
|
||||
#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
|
||||
|
||||
#ifndef _MSC_VER
|
||||
|
||||
@@ -58,7 +58,7 @@ std::string GetLastErrorMsg();
|
||||
namespace Common {
|
||||
|
||||
constexpr u32 MakeMagic(char a, char b, char c, char d) {
|
||||
return a | b << 8 | c << 16 | d << 24;
|
||||
return u32(a) | u32(b) << 8 | u32(c) << 16 | u32(d) << 24;
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -35,41 +35,6 @@ static inline u64 ComputeStructHash64(const T& data) {
|
||||
return ComputeHash64(&data, sizeof(data));
|
||||
}
|
||||
|
||||
/// A helper template that ensures the padding in a struct is initialized by memsetting to 0.
|
||||
template <typename T>
|
||||
struct HashableStruct {
|
||||
// In addition to being trivially copyable, T must also have a trivial default constructor,
|
||||
// because any member initialization would be overridden by memset
|
||||
static_assert(std::is_trivial_v<T>, "Type passed to HashableStruct must be trivial");
|
||||
/*
|
||||
* We use a union because "implicitly-defined copy/move constructor for a union X copies the
|
||||
* object representation of X." and "implicitly-defined copy assignment operator for a union X
|
||||
* copies the object representation (3.9) of X." = Bytewise copy instead of memberwise copy.
|
||||
* This is important because the padding bytes are included in the hash and comparison between
|
||||
* objects.
|
||||
*/
|
||||
union {
|
||||
T state;
|
||||
};
|
||||
|
||||
HashableStruct() {
|
||||
// Memset structure to zero padding bits, so that they will be deterministic when hashing
|
||||
std::memset(&state, 0, sizeof(T));
|
||||
}
|
||||
|
||||
bool operator==(const HashableStruct<T>& o) const {
|
||||
return std::memcmp(&state, &o.state, sizeof(T)) == 0;
|
||||
};
|
||||
|
||||
bool operator!=(const HashableStruct<T>& o) const {
|
||||
return !(*this == o);
|
||||
};
|
||||
|
||||
std::size_t Hash() const {
|
||||
return Common::ComputeStructHash64(state);
|
||||
}
|
||||
};
|
||||
|
||||
struct PairHash {
|
||||
template <class T1, class T2>
|
||||
std::size_t operator()(const std::pair<T1, T2>& pair) const noexcept {
|
||||
|
||||
@@ -272,8 +272,10 @@ const char* GetLogClassName(Class log_class) {
|
||||
#undef CLS
|
||||
#undef SUB
|
||||
case Class::Count:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return "Invalid";
|
||||
}
|
||||
|
||||
const char* GetLevelName(Level log_level) {
|
||||
@@ -288,9 +290,11 @@ const char* GetLevelName(Level log_level) {
|
||||
LVL(Error);
|
||||
LVL(Critical);
|
||||
case Level::Count:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
#undef LVL
|
||||
UNREACHABLE();
|
||||
return "Invalid";
|
||||
}
|
||||
|
||||
void SetGlobalFilter(const Filter& filter) {
|
||||
|
||||
@@ -46,9 +46,19 @@ public:
|
||||
ElementPtr* new_ptr = new ElementPtr();
|
||||
write_ptr->next.store(new_ptr, std::memory_order_release);
|
||||
write_ptr = new_ptr;
|
||||
cv.notify_one();
|
||||
|
||||
const size_t previous_size{size};
|
||||
++size;
|
||||
|
||||
// Acquire the mutex and then immediately release it as a fence.
|
||||
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
|
||||
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
|
||||
if (previous_size == 0) {
|
||||
std::lock_guard lock{cv_mutex};
|
||||
cv.notify_one();
|
||||
} else {
|
||||
cv.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
void Pop() {
|
||||
|
||||
@@ -170,6 +170,7 @@ add_library(core STATIC
|
||||
hle/kernel/server_port.h
|
||||
hle/kernel/server_session.cpp
|
||||
hle/kernel/server_session.h
|
||||
hle/kernel/session.cpp
|
||||
hle/kernel/session.h
|
||||
hle/kernel/shared_memory.cpp
|
||||
hle/kernel/shared_memory.h
|
||||
@@ -509,7 +510,6 @@ add_library(core STATIC
|
||||
memory/dmnt_cheat_vm.h
|
||||
memory.cpp
|
||||
memory.h
|
||||
memory_setup.h
|
||||
perf_stats.cpp
|
||||
perf_stats.h
|
||||
reporter.cpp
|
||||
@@ -522,6 +522,23 @@ add_library(core STATIC
|
||||
tools/freezer.h
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
target_compile_options(core PRIVATE
|
||||
# 'expression' : signed/unsigned mismatch
|
||||
/we4018
|
||||
# 'argument' : conversion from 'type1' to 'type2', possible loss of data (floating-point)
|
||||
/we4244
|
||||
# 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4245
|
||||
# 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4254
|
||||
# 'var' : conversion from 'size_t' to 'type', possible loss of data
|
||||
/we4267
|
||||
# 'context' : truncation from 'type1' to 'type2'
|
||||
/we4305
|
||||
)
|
||||
endif()
|
||||
|
||||
create_target_directory_groups(core)
|
||||
|
||||
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr u64 ELF_DYNAMIC_TAG_NULL = 0;
|
||||
@@ -61,15 +60,15 @@ static_assert(sizeof(ELFSymbol) == 0x18, "ELFSymbol has incorrect size.");
|
||||
|
||||
using Symbols = std::vector<std::pair<ELFSymbol, std::string>>;
|
||||
|
||||
Symbols GetSymbols(VAddr text_offset) {
|
||||
const auto mod_offset = text_offset + Memory::Read32(text_offset + 4);
|
||||
Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) {
|
||||
const auto mod_offset = text_offset + memory.Read32(text_offset + 4);
|
||||
|
||||
if (mod_offset < text_offset || (mod_offset & 0b11) != 0 ||
|
||||
Memory::Read32(mod_offset) != Common::MakeMagic('M', 'O', 'D', '0')) {
|
||||
memory.Read32(mod_offset) != Common::MakeMagic('M', 'O', 'D', '0')) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto dynamic_offset = Memory::Read32(mod_offset + 0x4) + mod_offset;
|
||||
const auto dynamic_offset = memory.Read32(mod_offset + 0x4) + mod_offset;
|
||||
|
||||
VAddr string_table_offset{};
|
||||
VAddr symbol_table_offset{};
|
||||
@@ -77,8 +76,8 @@ Symbols GetSymbols(VAddr text_offset) {
|
||||
|
||||
VAddr dynamic_index = dynamic_offset;
|
||||
while (true) {
|
||||
const auto tag = Memory::Read64(dynamic_index);
|
||||
const auto value = Memory::Read64(dynamic_index + 0x8);
|
||||
const u64 tag = memory.Read64(dynamic_index);
|
||||
const u64 value = memory.Read64(dynamic_index + 0x8);
|
||||
dynamic_index += 0x10;
|
||||
|
||||
if (tag == ELF_DYNAMIC_TAG_NULL) {
|
||||
@@ -106,11 +105,11 @@ Symbols GetSymbols(VAddr text_offset) {
|
||||
VAddr symbol_index = symbol_table_address;
|
||||
while (symbol_index < string_table_address) {
|
||||
ELFSymbol symbol{};
|
||||
Memory::ReadBlock(symbol_index, &symbol, sizeof(ELFSymbol));
|
||||
memory.ReadBlock(symbol_index, &symbol, sizeof(ELFSymbol));
|
||||
|
||||
VAddr string_offset = string_table_address + symbol.name_index;
|
||||
std::string name;
|
||||
for (u8 c = Memory::Read8(string_offset); c != 0; c = Memory::Read8(++string_offset)) {
|
||||
for (u8 c = memory.Read8(string_offset); c != 0; c = memory.Read8(++string_offset)) {
|
||||
name += static_cast<char>(c);
|
||||
}
|
||||
|
||||
@@ -142,28 +141,28 @@ constexpr u64 SEGMENT_BASE = 0x7100000000ull;
|
||||
|
||||
std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
|
||||
std::vector<BacktraceEntry> out;
|
||||
auto& memory = system.Memory();
|
||||
|
||||
auto fp = GetReg(29);
|
||||
auto lr = GetReg(30);
|
||||
|
||||
while (true) {
|
||||
out.push_back({"", 0, lr, 0});
|
||||
if (!fp) {
|
||||
break;
|
||||
}
|
||||
lr = Memory::Read64(fp + 8) - 4;
|
||||
fp = Memory::Read64(fp);
|
||||
lr = memory.Read64(fp + 8) - 4;
|
||||
fp = memory.Read64(fp);
|
||||
}
|
||||
|
||||
std::map<VAddr, std::string> modules;
|
||||
auto& loader{System::GetInstance().GetAppLoader()};
|
||||
auto& loader{system.GetAppLoader()};
|
||||
if (loader.ReadNSOModules(modules) != Loader::ResultStatus::Success) {
|
||||
return {};
|
||||
}
|
||||
|
||||
std::map<std::string, Symbols> symbols;
|
||||
for (const auto& module : modules) {
|
||||
symbols.insert_or_assign(module.second, GetSymbols(module.first));
|
||||
symbols.insert_or_assign(module.second, GetSymbols(module.first, memory));
|
||||
}
|
||||
|
||||
for (auto& entry : out) {
|
||||
|
||||
@@ -17,11 +17,13 @@ enum class VMAPermission : u8;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
virtual ~ARM_Interface() {}
|
||||
explicit ARM_Interface(System& system_) : system{system_} {}
|
||||
virtual ~ARM_Interface() = default;
|
||||
|
||||
struct ThreadContext {
|
||||
std::array<u64, 31> cpu_registers;
|
||||
@@ -163,6 +165,10 @@ public:
|
||||
/// fp+0 : pointer to previous frame record
|
||||
/// fp+8 : value of lr for frame
|
||||
void LogBacktrace() const;
|
||||
|
||||
protected:
|
||||
/// System context that this ARM interface is running under.
|
||||
System& system;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -28,36 +28,38 @@ public:
|
||||
explicit ARM_Dynarmic_Callbacks(ARM_Dynarmic& parent) : parent(parent) {}
|
||||
|
||||
u8 MemoryRead8(u64 vaddr) override {
|
||||
return Memory::Read8(vaddr);
|
||||
return parent.system.Memory().Read8(vaddr);
|
||||
}
|
||||
u16 MemoryRead16(u64 vaddr) override {
|
||||
return Memory::Read16(vaddr);
|
||||
return parent.system.Memory().Read16(vaddr);
|
||||
}
|
||||
u32 MemoryRead32(u64 vaddr) override {
|
||||
return Memory::Read32(vaddr);
|
||||
return parent.system.Memory().Read32(vaddr);
|
||||
}
|
||||
u64 MemoryRead64(u64 vaddr) override {
|
||||
return Memory::Read64(vaddr);
|
||||
return parent.system.Memory().Read64(vaddr);
|
||||
}
|
||||
Vector MemoryRead128(u64 vaddr) override {
|
||||
return {Memory::Read64(vaddr), Memory::Read64(vaddr + 8)};
|
||||
auto& memory = parent.system.Memory();
|
||||
return {memory.Read64(vaddr), memory.Read64(vaddr + 8)};
|
||||
}
|
||||
|
||||
void MemoryWrite8(u64 vaddr, u8 value) override {
|
||||
Memory::Write8(vaddr, value);
|
||||
parent.system.Memory().Write8(vaddr, value);
|
||||
}
|
||||
void MemoryWrite16(u64 vaddr, u16 value) override {
|
||||
Memory::Write16(vaddr, value);
|
||||
parent.system.Memory().Write16(vaddr, value);
|
||||
}
|
||||
void MemoryWrite32(u64 vaddr, u32 value) override {
|
||||
Memory::Write32(vaddr, value);
|
||||
parent.system.Memory().Write32(vaddr, value);
|
||||
}
|
||||
void MemoryWrite64(u64 vaddr, u64 value) override {
|
||||
Memory::Write64(vaddr, value);
|
||||
parent.system.Memory().Write64(vaddr, value);
|
||||
}
|
||||
void MemoryWrite128(u64 vaddr, Vector value) override {
|
||||
Memory::Write64(vaddr, value[0]);
|
||||
Memory::Write64(vaddr + 8, value[1]);
|
||||
auto& memory = parent.system.Memory();
|
||||
memory.Write64(vaddr, value[0]);
|
||||
memory.Write64(vaddr + 8, value[1]);
|
||||
}
|
||||
|
||||
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
|
||||
@@ -67,7 +69,7 @@ public:
|
||||
ARM_Interface::ThreadContext ctx;
|
||||
parent.SaveContext(ctx);
|
||||
parent.inner_unicorn.LoadContext(ctx);
|
||||
parent.inner_unicorn.ExecuteInstructions(static_cast<int>(num_instructions));
|
||||
parent.inner_unicorn.ExecuteInstructions(num_instructions);
|
||||
parent.inner_unicorn.SaveContext(ctx);
|
||||
parent.LoadContext(ctx);
|
||||
num_interpreted_instructions += num_instructions;
|
||||
@@ -171,9 +173,10 @@ void ARM_Dynarmic::Step() {
|
||||
|
||||
ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor,
|
||||
std::size_t core_index)
|
||||
: cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system},
|
||||
core_index{core_index}, system{system},
|
||||
exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
: ARM_Interface{system},
|
||||
cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), inner_unicorn{system},
|
||||
core_index{core_index}, exclusive_monitor{
|
||||
dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
|
||||
|
||||
ARM_Dynarmic::~ARM_Dynarmic() = default;
|
||||
|
||||
@@ -264,7 +267,9 @@ void ARM_Dynarmic::PageTableChanged(Common::PageTable& page_table,
|
||||
jit = MakeJit(page_table, new_address_space_size_in_bits);
|
||||
}
|
||||
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {}
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count)
|
||||
: monitor(core_count), memory{memory_} {}
|
||||
|
||||
DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
|
||||
|
||||
void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
|
||||
@@ -277,29 +282,28 @@ void DynarmicExclusiveMonitor::ClearExclusive() {
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 1,
|
||||
[&] { Memory::Write8(vaddr, value); });
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 1, [&] { memory.Write8(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 2,
|
||||
[&] { Memory::Write16(vaddr, value); });
|
||||
[&] { memory.Write16(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 4,
|
||||
[&] { Memory::Write32(vaddr, value); });
|
||||
[&] { memory.Write32(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 8,
|
||||
[&] { Memory::Write64(vaddr, value); });
|
||||
[&] { memory.Write64(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
|
||||
Memory::Write64(vaddr + 0, value[0]);
|
||||
Memory::Write64(vaddr + 8, value[1]);
|
||||
memory.Write64(vaddr + 0, value[0]);
|
||||
memory.Write64(vaddr + 8, value[1]);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/arm/unicorn/arm_unicorn.h"
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
class ARM_Dynarmic_Callbacks;
|
||||
@@ -58,13 +62,12 @@ private:
|
||||
ARM_Unicorn inner_unicorn;
|
||||
|
||||
std::size_t core_index;
|
||||
System& system;
|
||||
DynarmicExclusiveMonitor& exclusive_monitor;
|
||||
};
|
||||
|
||||
class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
|
||||
public:
|
||||
explicit DynarmicExclusiveMonitor(std::size_t core_count);
|
||||
explicit DynarmicExclusiveMonitor(Memory::Memory& memory_, std::size_t core_count);
|
||||
~DynarmicExclusiveMonitor() override;
|
||||
|
||||
void SetExclusive(std::size_t core_index, VAddr addr) override;
|
||||
@@ -79,6 +82,7 @@ public:
|
||||
private:
|
||||
friend class ARM_Dynarmic;
|
||||
Dynarmic::A64::ExclusiveMonitor monitor;
|
||||
Memory::Memory& memory;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -60,17 +60,18 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
|
||||
return false;
|
||||
}
|
||||
|
||||
ARM_Unicorn::ARM_Unicorn(System& system) : system{system} {
|
||||
ARM_Unicorn::ARM_Unicorn(System& system) : ARM_Interface{system} {
|
||||
CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc));
|
||||
|
||||
auto fpv = 3 << 20;
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv));
|
||||
|
||||
uc_hook hook{};
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, -1));
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0, -1));
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, UINT64_MAX));
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0,
|
||||
UINT64_MAX));
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, -1));
|
||||
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, UINT64_MAX));
|
||||
last_bkpt_hit = false;
|
||||
}
|
||||
}
|
||||
@@ -154,9 +155,10 @@ void ARM_Unicorn::SetTPIDR_EL0(u64 value) {
|
||||
|
||||
void ARM_Unicorn::Run() {
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
ExecuteInstructions(std::max(4000000, 0));
|
||||
ExecuteInstructions(std::max(4000000U, 0U));
|
||||
} else {
|
||||
ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), s64{0}));
|
||||
ExecuteInstructions(
|
||||
std::max(std::size_t(system.CoreTiming().GetDowncount()), std::size_t{0}));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,7 +168,7 @@ void ARM_Unicorn::Step() {
|
||||
|
||||
MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64));
|
||||
|
||||
void ARM_Unicorn::ExecuteInstructions(int num_instructions) {
|
||||
void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
|
||||
MICROPROFILE_SCOPE(ARM_Jit_Unicorn);
|
||||
CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
|
||||
system.CoreTiming().AddTicks(num_instructions);
|
||||
|
||||
@@ -34,7 +34,7 @@ public:
|
||||
void LoadContext(const ThreadContext& ctx) override;
|
||||
void PrepareReschedule() override;
|
||||
void ClearExclusiveState() override;
|
||||
void ExecuteInstructions(int num_instructions);
|
||||
void ExecuteInstructions(std::size_t num_instructions);
|
||||
void Run() override;
|
||||
void Step() override;
|
||||
void ClearInstructionCache() override;
|
||||
@@ -45,7 +45,6 @@ private:
|
||||
static void InterruptHook(uc_engine* uc, u32 int_no, void* user_data);
|
||||
|
||||
uc_engine* uc{};
|
||||
System& system;
|
||||
GDBStub::BreakpointAddress last_bkpt{};
|
||||
bool last_bkpt_hit = false;
|
||||
};
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/loader/loader.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/memory/cheat_engine.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "core/reporter.h"
|
||||
@@ -112,8 +113,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
|
||||
}
|
||||
struct System::Impl {
|
||||
explicit Impl(System& system)
|
||||
: kernel{system}, fs_controller{system}, cpu_core_manager{system}, reporter{system},
|
||||
applet_manager{system} {}
|
||||
: kernel{system}, fs_controller{system}, memory{system},
|
||||
cpu_core_manager{system}, reporter{system}, applet_manager{system} {}
|
||||
|
||||
Cpu& CurrentCpuCore() {
|
||||
return cpu_core_manager.GetCurrentCore();
|
||||
@@ -341,7 +342,8 @@ struct System::Impl {
|
||||
std::unique_ptr<VideoCore::RendererBase> renderer;
|
||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||
std::shared_ptr<Tegra::DebugContext> debug_context;
|
||||
std::unique_ptr<Core::Hardware::InterruptManager> interrupt_manager;
|
||||
std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
|
||||
Memory::Memory memory;
|
||||
CpuCoreManager cpu_core_manager;
|
||||
bool is_powered_on = false;
|
||||
bool exit_lock = false;
|
||||
@@ -498,6 +500,14 @@ const ExclusiveMonitor& System::Monitor() const {
|
||||
return impl->cpu_core_manager.GetExclusiveMonitor();
|
||||
}
|
||||
|
||||
Memory::Memory& System::Memory() {
|
||||
return impl->memory;
|
||||
}
|
||||
|
||||
const Memory::Memory& System::Memory() const {
|
||||
return impl->memory;
|
||||
}
|
||||
|
||||
Tegra::GPU& System::GPU() {
|
||||
return *impl->gpu_core;
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/file_sys/vfs_types.h"
|
||||
@@ -85,6 +86,10 @@ namespace Core::Hardware {
|
||||
class InterruptManager;
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
class ARM_Interface;
|
||||
@@ -224,6 +229,12 @@ public:
|
||||
/// Gets a constant reference to the exclusive monitor
|
||||
const ExclusiveMonitor& Monitor() const;
|
||||
|
||||
/// Gets a mutable reference to the system memory instance.
|
||||
Memory::Memory& Memory();
|
||||
|
||||
/// Gets a constant reference to the system memory instance.
|
||||
const Memory::Memory& Memory() const;
|
||||
|
||||
/// Gets a mutable reference to the GPU interface
|
||||
Tegra::GPU& GPU();
|
||||
|
||||
|
||||
@@ -66,9 +66,10 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
|
||||
|
||||
Cpu::~Cpu() = default;
|
||||
|
||||
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) {
|
||||
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(
|
||||
[[maybe_unused]] Memory::Memory& memory, [[maybe_unused]] std::size_t num_cores) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
return std::make_unique<DynarmicExclusiveMonitor>(num_cores);
|
||||
return std::make_unique<DynarmicExclusiveMonitor>(memory, num_cores);
|
||||
#else
|
||||
// TODO(merry): Passthrough exclusive monitor
|
||||
return nullptr;
|
||||
@@ -95,6 +96,8 @@ void Cpu::RunLoop(bool tight_loop) {
|
||||
} else {
|
||||
arm_interface->Step();
|
||||
}
|
||||
// We are stopping a run, exclusive state must be cleared
|
||||
arm_interface->ClearExclusiveState();
|
||||
}
|
||||
core_timing.Advance();
|
||||
|
||||
|
||||
@@ -24,6 +24,10 @@ namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
|
||||
class ARM_Interface;
|
||||
@@ -86,7 +90,19 @@ public:
|
||||
|
||||
void Shutdown();
|
||||
|
||||
static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores);
|
||||
/**
|
||||
* Creates an exclusive monitor to handle exclusive reads/writes.
|
||||
*
|
||||
* @param memory The current memory subsystem that the monitor may wish
|
||||
* to keep track of.
|
||||
*
|
||||
* @param num_cores The number of cores to assume about the CPU.
|
||||
*
|
||||
* @returns The constructed exclusive monitor instance, or nullptr if the current
|
||||
* CPU backend is unable to use an exclusive monitor.
|
||||
*/
|
||||
static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory,
|
||||
std::size_t num_cores);
|
||||
|
||||
private:
|
||||
void Reschedule();
|
||||
|
||||
@@ -17,11 +17,15 @@ namespace Core::Timing {
|
||||
|
||||
constexpr int MAX_SLICE_LENGTH = 10000;
|
||||
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||
}
|
||||
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
u64 userdata;
|
||||
const EventType* type;
|
||||
std::weak_ptr<EventType> type;
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by
|
||||
// the order added to the queue
|
||||
@@ -54,36 +58,15 @@ void CoreTiming::Initialize() {
|
||||
event_fifo_id = 0;
|
||||
|
||||
const auto empty_timed_callback = [](u64, s64) {};
|
||||
ev_lost = RegisterEvent("_lost_event", empty_timed_callback);
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
ClearPendingEvents();
|
||||
UnregisterAllEvents();
|
||||
}
|
||||
|
||||
EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
// check for existing type with same name.
|
||||
// we want event type names to remain unique so that we can use them for serialization.
|
||||
ASSERT_MSG(event_types.find(name) == event_types.end(),
|
||||
"CoreTiming Event \"{}\" is already registered. Events should only be registered "
|
||||
"during Init to avoid breaking save states.",
|
||||
name.c_str());
|
||||
|
||||
auto info = event_types.emplace(name, EventType{callback, nullptr});
|
||||
EventType* event_type = &info.first->second;
|
||||
event_type->name = &info.first->first;
|
||||
return event_type;
|
||||
}
|
||||
|
||||
void CoreTiming::UnregisterAllEvents() {
|
||||
ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending");
|
||||
event_types.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
|
||||
ASSERT(event_type != nullptr);
|
||||
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
const s64 timeout = GetTicks() + cycles_into_future;
|
||||
|
||||
@@ -93,13 +76,15 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty
|
||||
}
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type == event_type && e.userdata == userdata;
|
||||
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
@@ -130,10 +115,12 @@ void CoreTiming::ClearPendingEvents() {
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const EventType* event_type) {
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::lock_guard guard{inner_mutex};
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.type == event_type; });
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get();
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
@@ -181,7 +168,11 @@ void CoreTiming::Advance() {
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
inner_mutex.unlock();
|
||||
evt.type->callback(evt.userdata, global_timer - evt.time);
|
||||
|
||||
if (auto event_type{evt.type.lock()}) {
|
||||
event_type->callback(evt.userdata, global_timer - evt.time);
|
||||
}
|
||||
|
||||
inner_mutex.lock();
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,12 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/threadsafe_queue.h"
|
||||
|
||||
@@ -21,10 +22,13 @@ using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
|
||||
|
||||
/// Contains the characteristics of a particular event.
|
||||
struct EventType {
|
||||
EventType(TimedCallback&& callback, std::string&& name)
|
||||
: callback{std::move(callback)}, name{std::move(name)} {}
|
||||
|
||||
/// The event's callback function.
|
||||
TimedCallback callback;
|
||||
/// A pointer to the name of the event.
|
||||
const std::string* name;
|
||||
const std::string name;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -57,31 +61,17 @@ public:
|
||||
/// Tears down all timing related functionality.
|
||||
void Shutdown();
|
||||
|
||||
/// Registers a core timing event with the given name and callback.
|
||||
///
|
||||
/// @param name The name of the core timing event to register.
|
||||
/// @param callback The callback to execute for the event.
|
||||
///
|
||||
/// @returns An EventType instance representing the registered event.
|
||||
///
|
||||
/// @pre The name of the event being registered must be unique among all
|
||||
/// registered events.
|
||||
///
|
||||
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
|
||||
|
||||
/// Unregisters all registered events thus far. Note: not thread unsafe
|
||||
void UnregisterAllEvents();
|
||||
|
||||
/// After the first Advance, the slice lengths and the downcount will be reduced whenever an
|
||||
/// event is scheduled earlier than the current values.
|
||||
///
|
||||
/// Scheduling from a callback will not update the downcount until the Advance() completes.
|
||||
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
|
||||
void ScheduleEvent(s64 cycles_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata = 0);
|
||||
|
||||
void UnscheduleEvent(const EventType* event_type, u64 userdata);
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const EventType* event_type);
|
||||
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
||||
|
||||
void ForceExceptionCheck(s64 cycles);
|
||||
|
||||
@@ -148,13 +138,18 @@ private:
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
|
||||
// Stores each element separately as a linked list node so pointers to elements
|
||||
// remain stable regardless of rehashes/resizing.
|
||||
std::unordered_map<std::string, EventType> event_types;
|
||||
|
||||
EventType* ev_lost = nullptr;
|
||||
std::shared_ptr<EventType> ev_lost;
|
||||
|
||||
std::mutex inner_mutex;
|
||||
};
|
||||
|
||||
/// Creates a core timing event with the given name and callback.
|
||||
///
|
||||
/// @param name The name of the core timing event to create.
|
||||
/// @param callback The callback to execute for the event.
|
||||
///
|
||||
/// @returns An EventType instance representing the created event.
|
||||
///
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
||||
@@ -25,7 +25,7 @@ CpuCoreManager::~CpuCoreManager() = default;
|
||||
|
||||
void CpuCoreManager::Initialize() {
|
||||
barrier = std::make_unique<CpuBarrier>();
|
||||
exclusive_monitor = Cpu::MakeExclusiveMonitor(cores.size());
|
||||
exclusive_monitor = Cpu::MakeExclusiveMonitor(system.Memory(), cores.size());
|
||||
|
||||
for (std::size_t index = 0; index < cores.size(); ++index) {
|
||||
cores[index] = std::make_unique<Cpu>(system, *exclusive_monitor, *barrier, index);
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "common/file_util.h"
|
||||
#include "common/hex_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/core.h"
|
||||
#include "core/crypto/aes_util.h"
|
||||
#include "core/crypto/key_manager.h"
|
||||
@@ -378,8 +379,9 @@ std::vector<Ticket> GetTicketblob(const FileUtil::IOFile& ticket_save) {
|
||||
template <size_t size>
|
||||
static std::array<u8, size> operator^(const std::array<u8, size>& lhs,
|
||||
const std::array<u8, size>& rhs) {
|
||||
std::array<u8, size> out{};
|
||||
std::transform(lhs.begin(), lhs.end(), rhs.begin(), out.begin(), std::bit_xor<>());
|
||||
std::array<u8, size> out;
|
||||
std::transform(lhs.begin(), lhs.end(), rhs.begin(), out.begin(),
|
||||
[](u8 lhs, u8 rhs) { return u8(lhs ^ rhs); });
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -396,7 +398,7 @@ static std::array<u8, target_size> MGF1(const std::array<u8, in_size>& seed) {
|
||||
while (out.size() < target_size) {
|
||||
out.resize(out.size() + 0x20);
|
||||
seed_exp[in_size + 3] = static_cast<u8>(i);
|
||||
mbedtls_sha256(seed_exp.data(), seed_exp.size(), out.data() + out.size() - 0x20, 0);
|
||||
mbedtls_sha256_ret(seed_exp.data(), seed_exp.size(), out.data() + out.size() - 0x20, 0);
|
||||
++i;
|
||||
}
|
||||
|
||||
@@ -538,7 +540,7 @@ void KeyManager::LoadFromFile(const std::string& filename, bool is_title_keys) {
|
||||
Key128 key = Common::HexStringToArray<16>(out[1]);
|
||||
s128_keys[{S128KeyType::Titlekey, rights_id[1], rights_id[0]}] = key;
|
||||
} else {
|
||||
std::transform(out[0].begin(), out[0].end(), out[0].begin(), ::tolower);
|
||||
out[0] = Common::ToLower(out[0]);
|
||||
if (s128_file_id.find(out[0]) != s128_file_id.end()) {
|
||||
const auto index = s128_file_id.at(out[0]);
|
||||
Key128 key = Common::HexStringToArray<16>(out[1]);
|
||||
@@ -668,23 +670,27 @@ void KeyManager::WriteKeyToFile(KeyCategory category, std::string_view keyname,
|
||||
const std::array<u8, Size>& key) {
|
||||
const std::string yuzu_keys_dir = FileUtil::GetUserPath(FileUtil::UserPath::KeysDir);
|
||||
std::string filename = "title.keys_autogenerated";
|
||||
if (category == KeyCategory::Standard)
|
||||
if (category == KeyCategory::Standard) {
|
||||
filename = dev_mode ? "dev.keys_autogenerated" : "prod.keys_autogenerated";
|
||||
else if (category == KeyCategory::Console)
|
||||
} else if (category == KeyCategory::Console) {
|
||||
filename = "console.keys_autogenerated";
|
||||
const auto add_info_text = !FileUtil::Exists(yuzu_keys_dir + DIR_SEP + filename);
|
||||
FileUtil::CreateFullPath(yuzu_keys_dir + DIR_SEP + filename);
|
||||
std::ofstream file(yuzu_keys_dir + DIR_SEP + filename, std::ios::app);
|
||||
if (!file.is_open())
|
||||
return;
|
||||
if (add_info_text) {
|
||||
file
|
||||
<< "# This file is autogenerated by Yuzu\n"
|
||||
<< "# It serves to store keys that were automatically generated from the normal keys\n"
|
||||
<< "# If you are experiencing issues involving keys, it may help to delete this file\n";
|
||||
}
|
||||
|
||||
file << fmt::format("\n{} = {}", keyname, Common::HexToString(key));
|
||||
const auto path = yuzu_keys_dir + DIR_SEP + filename;
|
||||
const auto add_info_text = !FileUtil::Exists(path);
|
||||
FileUtil::CreateFullPath(path);
|
||||
FileUtil::IOFile file{path, "a"};
|
||||
if (!file.IsOpen()) {
|
||||
return;
|
||||
}
|
||||
if (add_info_text) {
|
||||
file.WriteString(
|
||||
"# This file is autogenerated by Yuzu\n"
|
||||
"# It serves to store keys that were automatically generated from the normal keys\n"
|
||||
"# If you are experiencing issues involving keys, it may help to delete this file\n");
|
||||
}
|
||||
|
||||
file.WriteString(fmt::format("\n{} = {}", keyname, Common::HexToString(key)));
|
||||
AttemptLoadKeyFile(yuzu_keys_dir, yuzu_keys_dir, filename, category == KeyCategory::Title);
|
||||
}
|
||||
|
||||
@@ -944,12 +950,10 @@ void KeyManager::DeriveETicket(PartitionDataManager& data) {
|
||||
return;
|
||||
}
|
||||
|
||||
Key128 rsa_oaep_kek{};
|
||||
std::transform(seed3.begin(), seed3.end(), mask0.begin(), rsa_oaep_kek.begin(),
|
||||
std::bit_xor<>());
|
||||
|
||||
if (rsa_oaep_kek == Key128{})
|
||||
const Key128 rsa_oaep_kek = seed3 ^ mask0;
|
||||
if (rsa_oaep_kek == Key128{}) {
|
||||
return;
|
||||
}
|
||||
|
||||
SetKey(S128KeyType::Source, rsa_oaep_kek,
|
||||
static_cast<u64>(SourceKeyType::RSAOaepKekGeneration));
|
||||
|
||||
@@ -161,7 +161,7 @@ std::array<u8, key_size> FindKeyFromHex(const std::vector<u8>& binary,
|
||||
|
||||
std::array<u8, 0x20> temp{};
|
||||
for (size_t i = 0; i < binary.size() - key_size; ++i) {
|
||||
mbedtls_sha256(binary.data() + i, key_size, temp.data(), 0);
|
||||
mbedtls_sha256_ret(binary.data() + i, key_size, temp.data(), 0);
|
||||
|
||||
if (temp != hash)
|
||||
continue;
|
||||
@@ -189,7 +189,7 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector<
|
||||
AESCipher<Key128> cipher(key, Mode::ECB);
|
||||
for (size_t i = 0; i < binary.size() - 0x10; ++i) {
|
||||
cipher.Transcode(binary.data() + i, dec_temp.size(), dec_temp.data(), Op::Decrypt);
|
||||
mbedtls_sha256(dec_temp.data(), dec_temp.size(), temp.data(), 0);
|
||||
mbedtls_sha256_ret(dec_temp.data(), dec_temp.size(), temp.data(), 0);
|
||||
|
||||
for (size_t k = 0; k < out.size(); ++k) {
|
||||
if (temp == master_key_hashes[k]) {
|
||||
@@ -204,11 +204,12 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector<
|
||||
|
||||
FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir,
|
||||
const std::string& name) {
|
||||
auto upper = name;
|
||||
std::transform(upper.begin(), upper.end(), upper.begin(), [](u8 c) { return std::toupper(c); });
|
||||
const auto upper = Common::ToUpper(name);
|
||||
|
||||
for (const auto& fname : {name, name + ".bin", upper, upper + ".BIN"}) {
|
||||
if (dir->GetFile(fname) != nullptr)
|
||||
if (dir->GetFile(fname) != nullptr) {
|
||||
return dir->GetFile(fname);
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
||||
@@ -32,11 +32,28 @@ enum class NCASectionFilesystemType : u8 {
|
||||
ROMFS = 0x3,
|
||||
};
|
||||
|
||||
struct IVFCLevel {
|
||||
u64_le offset;
|
||||
u64_le size;
|
||||
u32_le block_size;
|
||||
u32_le reserved;
|
||||
};
|
||||
static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
|
||||
|
||||
struct IVFCHeader {
|
||||
u32_le magic;
|
||||
u32_le magic_number;
|
||||
INSERT_UNION_PADDING_BYTES(8);
|
||||
std::array<IVFCLevel, 6> levels;
|
||||
INSERT_UNION_PADDING_BYTES(64);
|
||||
};
|
||||
static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
|
||||
|
||||
struct NCASectionHeaderBlock {
|
||||
INSERT_PADDING_BYTES(3);
|
||||
INSERT_UNION_PADDING_BYTES(3);
|
||||
NCASectionFilesystemType filesystem_type;
|
||||
NCASectionCryptoType crypto_type;
|
||||
INSERT_PADDING_BYTES(3);
|
||||
INSERT_UNION_PADDING_BYTES(3);
|
||||
};
|
||||
static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
|
||||
|
||||
@@ -44,7 +61,7 @@ struct NCASectionRaw {
|
||||
NCASectionHeaderBlock header;
|
||||
std::array<u8, 0x138> block_data;
|
||||
std::array<u8, 0x8> section_ctr;
|
||||
INSERT_PADDING_BYTES(0xB8);
|
||||
INSERT_UNION_PADDING_BYTES(0xB8);
|
||||
};
|
||||
static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
|
||||
|
||||
@@ -52,19 +69,19 @@ struct PFS0Superblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
std::array<u8, 0x20> hash;
|
||||
u32_le size;
|
||||
INSERT_PADDING_BYTES(4);
|
||||
INSERT_UNION_PADDING_BYTES(4);
|
||||
u64_le hash_table_offset;
|
||||
u64_le hash_table_size;
|
||||
u64_le pfs0_header_offset;
|
||||
u64_le pfs0_size;
|
||||
INSERT_PADDING_BYTES(0x1B0);
|
||||
INSERT_UNION_PADDING_BYTES(0x1B0);
|
||||
};
|
||||
static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
|
||||
|
||||
struct RomFSSuperblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
IVFCHeader ivfc;
|
||||
INSERT_PADDING_BYTES(0x118);
|
||||
INSERT_UNION_PADDING_BYTES(0x118);
|
||||
};
|
||||
static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
|
||||
|
||||
@@ -72,24 +89,24 @@ struct BKTRHeader {
|
||||
u64_le offset;
|
||||
u64_le size;
|
||||
u32_le magic;
|
||||
INSERT_PADDING_BYTES(0x4);
|
||||
INSERT_UNION_PADDING_BYTES(0x4);
|
||||
u32_le number_entries;
|
||||
INSERT_PADDING_BYTES(0x4);
|
||||
INSERT_UNION_PADDING_BYTES(0x4);
|
||||
};
|
||||
static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
|
||||
|
||||
struct BKTRSuperblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
IVFCHeader ivfc;
|
||||
INSERT_PADDING_BYTES(0x18);
|
||||
INSERT_UNION_PADDING_BYTES(0x18);
|
||||
BKTRHeader relocation;
|
||||
BKTRHeader subsection;
|
||||
INSERT_PADDING_BYTES(0xC0);
|
||||
INSERT_UNION_PADDING_BYTES(0xC0);
|
||||
};
|
||||
static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
|
||||
|
||||
union NCASectionHeader {
|
||||
NCASectionRaw raw;
|
||||
NCASectionRaw raw{};
|
||||
PFS0Superblock pfs0;
|
||||
RomFSSuperblock romfs;
|
||||
BKTRSuperblock bktr;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
enum EntryType : u8 {
|
||||
enum class EntryType : u8 {
|
||||
Directory = 0,
|
||||
File = 1,
|
||||
};
|
||||
|
||||
@@ -147,7 +147,7 @@ std::vector<u32> KIP::GetKernelCapabilities() const {
|
||||
}
|
||||
|
||||
s32 KIP::GetMainThreadPriority() const {
|
||||
return header.main_thread_priority;
|
||||
return static_cast<s32>(header.main_thread_priority);
|
||||
}
|
||||
|
||||
u32 KIP::GetMainThreadStackSize() const {
|
||||
|
||||
@@ -76,7 +76,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
|
||||
|
||||
const auto& disabled = Settings::values.disabled_addons[title_id];
|
||||
const auto update_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "Update") != disabled.end();
|
||||
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
|
||||
|
||||
// Game Updates
|
||||
const auto update_tid = GetUpdateTitleID(title_id);
|
||||
@@ -127,7 +127,7 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD
|
||||
std::vector<VirtualFile> out;
|
||||
out.reserve(patch_dirs.size());
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend())
|
||||
continue;
|
||||
|
||||
auto exefs_dir = subdir->GetSubdirectory("exefs");
|
||||
@@ -284,12 +284,17 @@ std::vector<Memory::CheatEntry> PatchManager::CreateCheatList(
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto& disabled = Settings::values.disabled_addons[title_id];
|
||||
auto patch_dirs = load_dir->GetSubdirectories();
|
||||
std::sort(patch_dirs.begin(), patch_dirs.end(),
|
||||
[](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); });
|
||||
|
||||
std::vector<Memory::CheatEntry> out;
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto cheats_dir = subdir->GetSubdirectory("cheats");
|
||||
if (cheats_dir != nullptr) {
|
||||
auto res = ReadCheatFileFromFolder(system, title_id, build_id_, cheats_dir, true);
|
||||
@@ -331,8 +336,9 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
|
||||
layers.reserve(patch_dirs.size() + 1);
|
||||
layers_ext.reserve(patch_dirs.size() + 1);
|
||||
for (const auto& subdir : patch_dirs) {
|
||||
if (std::find(disabled.begin(), disabled.end(), subdir->GetName()) != disabled.end())
|
||||
if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto romfs_dir = subdir->GetSubdirectory("romfs");
|
||||
if (romfs_dir != nullptr)
|
||||
@@ -381,7 +387,7 @@ VirtualFile PatchManager::PatchRomFS(VirtualFile romfs, u64 ivfc_offset, Content
|
||||
|
||||
const auto& disabled = Settings::values.disabled_addons[title_id];
|
||||
const auto update_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "Update") != disabled.end();
|
||||
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
|
||||
|
||||
if (!update_disabled && update != nullptr) {
|
||||
const auto new_nca = std::make_shared<NCA>(update, romfs, ivfc_offset);
|
||||
@@ -431,7 +437,7 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam
|
||||
auto [nacp, discard_icon_file] = update.GetControlMetadata();
|
||||
|
||||
const auto update_disabled =
|
||||
std::find(disabled.begin(), disabled.end(), "Update") != disabled.end();
|
||||
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
|
||||
const auto update_label = update_disabled ? "[D] Update" : "Update";
|
||||
|
||||
if (nacp != nullptr) {
|
||||
|
||||
@@ -52,14 +52,14 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
|
||||
}
|
||||
|
||||
void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space,
|
||||
u8 main_thread_prio, u8 main_thread_core,
|
||||
s32 main_thread_prio, u32 main_thread_core,
|
||||
u32 main_thread_stack_size, u64 title_id,
|
||||
u64 filesystem_permissions,
|
||||
KernelCapabilityDescriptors capabilities) {
|
||||
npdm_header.has_64_bit_instructions.Assign(is_64_bit);
|
||||
npdm_header.address_space_type.Assign(address_space);
|
||||
npdm_header.main_thread_priority = main_thread_prio;
|
||||
npdm_header.main_thread_cpu = main_thread_core;
|
||||
npdm_header.main_thread_priority = static_cast<u8>(main_thread_prio);
|
||||
npdm_header.main_thread_cpu = static_cast<u8>(main_thread_core);
|
||||
npdm_header.main_stack_size = main_thread_stack_size;
|
||||
aci_header.title_id = title_id;
|
||||
aci_file_access.permissions = filesystem_permissions;
|
||||
|
||||
@@ -47,8 +47,8 @@ public:
|
||||
Loader::ResultStatus Load(VirtualFile file);
|
||||
|
||||
// Load from parameters instead of NPDM file, used for KIP
|
||||
void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, u8 main_thread_prio,
|
||||
u8 main_thread_core, u32 main_thread_stack_size, u64 title_id,
|
||||
void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio,
|
||||
u32 main_thread_core, u32 main_thread_stack_size, u64 title_id,
|
||||
u64 filesystem_permissions, KernelCapabilityDescriptors capabilities);
|
||||
|
||||
bool Is64BitProgram() const;
|
||||
|
||||
@@ -62,7 +62,7 @@ static std::string GetRelativePathFromNcaID(const std::array<u8, 16>& nca_id, bo
|
||||
Common::HexToString(nca_id, second_hex_upper));
|
||||
|
||||
Core::Crypto::SHA256Hash hash{};
|
||||
mbedtls_sha256(nca_id.data(), nca_id.size(), hash.data(), 0);
|
||||
mbedtls_sha256_ret(nca_id.data(), nca_id.size(), hash.data(), 0);
|
||||
return fmt::format(cnmt_suffix ? "/000000{:02X}/{}.cnmt.nca" : "/000000{:02X}/{}.nca", hash[0],
|
||||
Common::HexToString(nca_id, second_hex_upper));
|
||||
}
|
||||
@@ -141,7 +141,7 @@ bool PlaceholderCache::Create(const NcaID& id, u64 size) const {
|
||||
}
|
||||
|
||||
Core::Crypto::SHA256Hash hash{};
|
||||
mbedtls_sha256(id.data(), id.size(), hash.data(), 0);
|
||||
mbedtls_sha256_ret(id.data(), id.size(), hash.data(), 0);
|
||||
const auto dirname = fmt::format("000000{:02X}", hash[0]);
|
||||
|
||||
const auto dir2 = GetOrCreateDirectoryRelative(dir, dirname);
|
||||
@@ -165,7 +165,7 @@ bool PlaceholderCache::Delete(const NcaID& id) const {
|
||||
}
|
||||
|
||||
Core::Crypto::SHA256Hash hash{};
|
||||
mbedtls_sha256(id.data(), id.size(), hash.data(), 0);
|
||||
mbedtls_sha256_ret(id.data(), id.size(), hash.data(), 0);
|
||||
const auto dirname = fmt::format("000000{:02X}", hash[0]);
|
||||
|
||||
const auto dir2 = GetOrCreateDirectoryRelative(dir, dirname);
|
||||
@@ -603,7 +603,7 @@ InstallResult RegisteredCache::InstallEntry(const NCA& nca, TitleType type,
|
||||
OptionalHeader opt_header{0, 0};
|
||||
ContentRecord c_rec{{}, {}, {}, GetCRTypeFromNCAType(nca.GetType()), {}};
|
||||
const auto& data = nca.GetBaseFile()->ReadBytes(0x100000);
|
||||
mbedtls_sha256(data.data(), data.size(), c_rec.hash.data(), 0);
|
||||
mbedtls_sha256_ret(data.data(), data.size(), c_rec.hash.data(), 0);
|
||||
memcpy(&c_rec.nca_id, &c_rec.hash, 16);
|
||||
const CNMT new_cnmt(header, opt_header, {c_rec}, {});
|
||||
if (!RawInstallYuzuMeta(new_cnmt))
|
||||
@@ -626,7 +626,7 @@ InstallResult RegisteredCache::RawInstallNCA(const NCA& nca, const VfsCopyFuncti
|
||||
id = *override_id;
|
||||
} else {
|
||||
const auto& data = in->ReadBytes(0x100000);
|
||||
mbedtls_sha256(data.data(), data.size(), hash.data(), 0);
|
||||
mbedtls_sha256_ret(data.data(), data.size(), hash.data(), 0);
|
||||
memcpy(id.data(), hash.data(), 16);
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/fsmitm_romfsbuild.h"
|
||||
@@ -12,7 +14,7 @@
|
||||
#include "core/file_sys/vfs_vector.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
namespace {
|
||||
constexpr u32 ROMFS_ENTRY_EMPTY = 0xFFFFFFFF;
|
||||
|
||||
struct TableLocation {
|
||||
@@ -51,7 +53,7 @@ struct FileEntry {
|
||||
static_assert(sizeof(FileEntry) == 0x20, "FileEntry has incorrect size.");
|
||||
|
||||
template <typename Entry>
|
||||
static std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offset) {
|
||||
std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offset) {
|
||||
Entry entry{};
|
||||
if (file->ReadObject(&entry, offset) != sizeof(Entry))
|
||||
return {};
|
||||
@@ -99,6 +101,7 @@ void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file
|
||||
this_dir_offset = entry.first.sibling;
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
|
||||
RomFSHeader header{};
|
||||
|
||||
@@ -5,33 +5,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <map>
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/file_sys/vfs.h"
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
struct RomFSHeader;
|
||||
|
||||
struct IVFCLevel {
|
||||
u64_le offset;
|
||||
u64_le size;
|
||||
u32_le block_size;
|
||||
u32_le reserved;
|
||||
};
|
||||
static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
|
||||
|
||||
struct IVFCHeader {
|
||||
u32_le magic;
|
||||
u32_le magic_number;
|
||||
INSERT_PADDING_BYTES(8);
|
||||
std::array<IVFCLevel, 6> levels;
|
||||
INSERT_PADDING_BYTES(64);
|
||||
};
|
||||
static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
|
||||
|
||||
enum class RomFSExtractionType {
|
||||
Full, // Includes data directory
|
||||
Truncated, // Traverses into data directory
|
||||
|
||||
@@ -71,12 +71,12 @@ ResultVal<VirtualFile> RomFSFactory::Open(u64 title_id, StorageId storage,
|
||||
|
||||
if (res == nullptr) {
|
||||
// TODO(DarkLordZach): Find the right error code to use here
|
||||
return ResultCode(-1);
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
const auto romfs = res->GetRomFS();
|
||||
if (romfs == nullptr) {
|
||||
// TODO(DarkLordZach): Find the right error code to use here
|
||||
return ResultCode(-1);
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
return MakeResult<VirtualFile>(romfs);
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ namespace FileSys {
|
||||
constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size";
|
||||
|
||||
namespace {
|
||||
|
||||
void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
|
||||
if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) {
|
||||
if (meta.zero_1 != 0) {
|
||||
@@ -52,6 +53,13 @@ void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
|
||||
meta.user_id[1], meta.user_id[0]);
|
||||
}
|
||||
}
|
||||
|
||||
bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataDescriptor& desc) {
|
||||
return desc.type == SaveDataType::CacheStorage || desc.type == SaveDataType::TemporaryStorage ||
|
||||
(space == SaveDataSpaceId::NandUser && ///< Normal Save Data -- Current Title & User
|
||||
desc.type == SaveDataType::SaveData && desc.title_id == 0 && desc.save_id == 0);
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
std::string SaveDataDescriptor::DebugInfo() const {
|
||||
@@ -82,7 +90,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
|
||||
// Return an error if the save data doesn't actually exist.
|
||||
if (out == nullptr) {
|
||||
// TODO(DarkLordZach): Find out correct error code.
|
||||
return ResultCode(-1);
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
return MakeResult<VirtualDir>(std::move(out));
|
||||
@@ -96,10 +104,14 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
|
||||
|
||||
auto out = dir->GetDirectoryRelative(save_directory);
|
||||
|
||||
if (out == nullptr && ShouldSaveDataBeAutomaticallyCreated(space, meta)) {
|
||||
return Create(space, meta);
|
||||
}
|
||||
|
||||
// Return an error if the save data doesn't actually exist.
|
||||
if (out == nullptr) {
|
||||
// TODO(Subv): Find out correct error code.
|
||||
return ResultCode(-1);
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
return MakeResult<VirtualDir>(std::move(out));
|
||||
|
||||
@@ -27,7 +27,7 @@ VirtualDir ExtractZIP(VirtualFile file) {
|
||||
|
||||
std::shared_ptr<VectorVfsDirectory> out = std::make_shared<VectorVfsDirectory>();
|
||||
|
||||
const auto num_entries = zip_get_num_entries(zip.get(), 0);
|
||||
const auto num_entries = static_cast<std::size_t>(zip_get_num_entries(zip.get(), 0));
|
||||
|
||||
zip_stat_t stat{};
|
||||
zip_stat_init(&stat);
|
||||
|
||||
@@ -7,12 +7,13 @@
|
||||
#include <cstring>
|
||||
#include <regex>
|
||||
#include <string>
|
||||
|
||||
#include <mbedtls/md.h>
|
||||
#include <mbedtls/sha256.h>
|
||||
#include "common/assert.h"
|
||||
|
||||
#include "common/file_util.h"
|
||||
#include "common/hex_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/crypto/aes_util.h"
|
||||
#include "core/crypto/xts_encryption_layer.h"
|
||||
#include "core/file_sys/partition_filesystem.h"
|
||||
@@ -53,18 +54,15 @@ NAX::NAX(VirtualFile file_) : header(std::make_unique<NAXHeader>()), file(std::m
|
||||
return;
|
||||
}
|
||||
|
||||
std::string two_dir = match[1];
|
||||
std::string nca_id = match[2];
|
||||
std::transform(two_dir.begin(), two_dir.end(), two_dir.begin(), ::toupper);
|
||||
std::transform(nca_id.begin(), nca_id.end(), nca_id.begin(), ::tolower);
|
||||
|
||||
const std::string two_dir = Common::ToUpper(match[1]);
|
||||
const std::string nca_id = Common::ToLower(match[2]);
|
||||
status = Parse(fmt::format("/registered/{}/{}.nca", two_dir, nca_id));
|
||||
}
|
||||
|
||||
NAX::NAX(VirtualFile file_, std::array<u8, 0x10> nca_id)
|
||||
: header(std::make_unique<NAXHeader>()), file(std::move(file_)) {
|
||||
Core::Crypto::SHA256Hash hash{};
|
||||
mbedtls_sha256(nca_id.data(), nca_id.size(), hash.data(), 0);
|
||||
mbedtls_sha256_ret(nca_id.data(), nca_id.size(), hash.data(), 0);
|
||||
status = Parse(fmt::format("/registered/000000{:02X}/{}.nca", hash[0],
|
||||
Common::HexToString(nca_id, false)));
|
||||
}
|
||||
@@ -93,8 +91,7 @@ Loader::ResultStatus NAX::Parse(std::string_view path) {
|
||||
std::size_t i = 0;
|
||||
for (; i < sd_keys.size(); ++i) {
|
||||
std::array<Core::Crypto::Key128, 2> nax_keys{};
|
||||
if (!CalculateHMAC256(nax_keys.data(), sd_keys[i].data(), 0x10, std::string(path).c_str(),
|
||||
path.size())) {
|
||||
if (!CalculateHMAC256(nax_keys.data(), sd_keys[i].data(), 0x10, path.data(), path.size())) {
|
||||
return Loader::ResultStatus::ErrorNAXKeyHMACFailed;
|
||||
}
|
||||
|
||||
|
||||
@@ -468,7 +468,8 @@ static u8 ReadByte() {
|
||||
|
||||
/// Calculate the checksum of the current command buffer.
|
||||
static u8 CalculateChecksum(const u8* buffer, std::size_t length) {
|
||||
return static_cast<u8>(std::accumulate(buffer, buffer + length, 0, std::plus<u8>()));
|
||||
return static_cast<u8>(std::accumulate(buffer, buffer + length, u8{0},
|
||||
[](u8 lhs, u8 rhs) { return u8(lhs + rhs); }));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -507,8 +508,9 @@ static void RemoveBreakpoint(BreakpointType type, VAddr addr) {
|
||||
bp->second.len, bp->second.addr, static_cast<int>(type));
|
||||
|
||||
if (type == BreakpointType::Execute) {
|
||||
Memory::WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size());
|
||||
Core::System::GetInstance().InvalidateCpuInstructionCaches();
|
||||
auto& system = Core::System::GetInstance();
|
||||
system.Memory().WriteBlock(bp->second.addr, bp->second.inst.data(), bp->second.inst.size());
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
}
|
||||
p.erase(addr);
|
||||
}
|
||||
@@ -968,12 +970,13 @@ static void ReadMemory() {
|
||||
SendReply("E01");
|
||||
}
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(addr)) {
|
||||
auto& memory = Core::System::GetInstance().Memory();
|
||||
if (!memory.IsValidVirtualAddress(addr)) {
|
||||
return SendReply("E00");
|
||||
}
|
||||
|
||||
std::vector<u8> data(len);
|
||||
Memory::ReadBlock(addr, data.data(), len);
|
||||
memory.ReadBlock(addr, data.data(), len);
|
||||
|
||||
MemToGdbHex(reply, data.data(), len);
|
||||
reply[len * 2] = '\0';
|
||||
@@ -983,22 +986,23 @@ static void ReadMemory() {
|
||||
/// Modify location in memory with data received from the gdb client.
|
||||
static void WriteMemory() {
|
||||
auto start_offset = command_buffer + 1;
|
||||
auto addr_pos = std::find(start_offset, command_buffer + command_length, ',');
|
||||
VAddr addr = HexToLong(start_offset, static_cast<u64>(addr_pos - start_offset));
|
||||
const auto addr_pos = std::find(start_offset, command_buffer + command_length, ',');
|
||||
const VAddr addr = HexToLong(start_offset, static_cast<u64>(addr_pos - start_offset));
|
||||
|
||||
start_offset = addr_pos + 1;
|
||||
auto len_pos = std::find(start_offset, command_buffer + command_length, ':');
|
||||
u64 len = HexToLong(start_offset, static_cast<u64>(len_pos - start_offset));
|
||||
const auto len_pos = std::find(start_offset, command_buffer + command_length, ':');
|
||||
const u64 len = HexToLong(start_offset, static_cast<u64>(len_pos - start_offset));
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(addr)) {
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.Memory();
|
||||
if (!memory.IsValidVirtualAddress(addr)) {
|
||||
return SendReply("E00");
|
||||
}
|
||||
|
||||
std::vector<u8> data(len);
|
||||
|
||||
GdbHexToMem(data.data(), len_pos + 1, len);
|
||||
Memory::WriteBlock(addr, data.data(), len);
|
||||
Core::System::GetInstance().InvalidateCpuInstructionCaches();
|
||||
memory.WriteBlock(addr, data.data(), len);
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
SendReply("OK");
|
||||
}
|
||||
|
||||
@@ -1054,12 +1058,15 @@ static bool CommitBreakpoint(BreakpointType type, VAddr addr, u64 len) {
|
||||
breakpoint.active = true;
|
||||
breakpoint.addr = addr;
|
||||
breakpoint.len = len;
|
||||
Memory::ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size());
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.Memory();
|
||||
memory.ReadBlock(addr, breakpoint.inst.data(), breakpoint.inst.size());
|
||||
|
||||
static constexpr std::array<u8, 4> btrap{0x00, 0x7d, 0x20, 0xd4};
|
||||
if (type == BreakpointType::Execute) {
|
||||
Memory::WriteBlock(addr, btrap.data(), btrap.size());
|
||||
Core::System::GetInstance().InvalidateCpuInstructionCaches();
|
||||
memory.WriteBlock(addr, btrap.data(), btrap.size());
|
||||
system.InvalidateCpuInstructionCaches();
|
||||
}
|
||||
p.insert({addr, breakpoint});
|
||||
|
||||
|
||||
@@ -11,13 +11,12 @@
|
||||
namespace Core::Hardware {
|
||||
|
||||
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
||||
gpu_interrupt_event =
|
||||
system.CoreTiming().RegisterEvent("GPUInterrupt", [this](u64 message, s64) {
|
||||
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
||||
const u32 syncpt = static_cast<u32>(message >> 32);
|
||||
const u32 value = static_cast<u32>(message);
|
||||
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
|
||||
});
|
||||
gpu_interrupt_event = Core::Timing::CreateEvent("GPUInterrupt", [this](u64 message, s64) {
|
||||
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
||||
const u32 syncpt = static_cast<u32>(message >> 32);
|
||||
const u32 value = static_cast<u32>(message);
|
||||
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
|
||||
});
|
||||
}
|
||||
|
||||
InterruptManager::~InterruptManager() = default;
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -25,7 +27,7 @@ public:
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
Core::Timing::EventType* gpu_interrupt_event{};
|
||||
std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
|
||||
};
|
||||
|
||||
} // namespace Core::Hardware
|
||||
|
||||
@@ -160,7 +160,7 @@ struct DomainMessageHeader {
|
||||
// Used when responding to an IPC request, Server -> Client.
|
||||
struct {
|
||||
u32_le num_objects;
|
||||
INSERT_PADDING_WORDS(3);
|
||||
INSERT_UNION_PADDING_WORDS(3);
|
||||
};
|
||||
|
||||
// Used when performing an IPC request, Client -> Server.
|
||||
@@ -171,8 +171,10 @@ struct DomainMessageHeader {
|
||||
BitField<16, 16, u32> size;
|
||||
};
|
||||
u32_le object_id;
|
||||
INSERT_PADDING_WORDS(2);
|
||||
INSERT_UNION_PADDING_WORDS(2);
|
||||
};
|
||||
|
||||
std::array<u32, 4> raw{};
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect");
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace IPC {
|
||||
@@ -139,10 +140,9 @@ public:
|
||||
context->AddDomainObject(std::move(iface));
|
||||
} else {
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
auto [server, client] =
|
||||
Kernel::ServerSession::CreateSessionPair(kernel, iface->GetServiceName());
|
||||
iface->ClientConnected(server);
|
||||
auto [client, server] = Kernel::Session::Create(kernel, iface->GetServiceName());
|
||||
context->AddMoveObject(std::move(client));
|
||||
iface->ClientConnected(std::move(server));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,10 +203,10 @@ public:
|
||||
void PushRaw(const T& value);
|
||||
|
||||
template <typename... O>
|
||||
void PushMoveObjects(Kernel::SharedPtr<O>... pointers);
|
||||
void PushMoveObjects(std::shared_ptr<O>... pointers);
|
||||
|
||||
template <typename... O>
|
||||
void PushCopyObjects(Kernel::SharedPtr<O>... pointers);
|
||||
void PushCopyObjects(std::shared_ptr<O>... pointers);
|
||||
|
||||
private:
|
||||
u32 normal_params_size{};
|
||||
@@ -298,7 +298,7 @@ void ResponseBuilder::Push(const First& first_value, const Other&... other_value
|
||||
}
|
||||
|
||||
template <typename... O>
|
||||
inline void ResponseBuilder::PushCopyObjects(Kernel::SharedPtr<O>... pointers) {
|
||||
inline void ResponseBuilder::PushCopyObjects(std::shared_ptr<O>... pointers) {
|
||||
auto objects = {pointers...};
|
||||
for (auto& object : objects) {
|
||||
context->AddCopyObject(std::move(object));
|
||||
@@ -306,7 +306,7 @@ inline void ResponseBuilder::PushCopyObjects(Kernel::SharedPtr<O>... pointers) {
|
||||
}
|
||||
|
||||
template <typename... O>
|
||||
inline void ResponseBuilder::PushMoveObjects(Kernel::SharedPtr<O>... pointers) {
|
||||
inline void ResponseBuilder::PushMoveObjects(std::shared_ptr<O>... pointers) {
|
||||
auto objects = {pointers...};
|
||||
for (auto& object : objects) {
|
||||
context->AddMoveObject(std::move(object));
|
||||
@@ -357,10 +357,10 @@ public:
|
||||
T PopRaw();
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> GetMoveObject(std::size_t index);
|
||||
std::shared_ptr<T> GetMoveObject(std::size_t index);
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> GetCopyObject(std::size_t index);
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index);
|
||||
|
||||
template <class T>
|
||||
std::shared_ptr<T> PopIpcInterface() {
|
||||
@@ -465,12 +465,12 @@ void RequestParser::Pop(First& first_value, Other&... other_values) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> RequestParser::GetMoveObject(std::size_t index) {
|
||||
std::shared_ptr<T> RequestParser::GetMoveObject(std::size_t index) {
|
||||
return context->GetMoveObject<T>(index);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> RequestParser::GetCopyObject(std::size_t index) {
|
||||
std::shared_ptr<T> RequestParser::GetCopyObject(std::size_t index) {
|
||||
return context->GetCopyObject<T>(index);
|
||||
}
|
||||
|
||||
|
||||
@@ -11,18 +11,16 @@
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/scheduler.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
|
||||
auto& system = Core::System::GetInstance();
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
@@ -34,12 +32,12 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb);
|
||||
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->SetArbiterWaitAddress(0);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
@@ -59,35 +57,41 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!Memory::IsValidVirtualAddress(address)) {
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
if (static_cast<s32>(Memory::Read32(address)) != value) {
|
||||
if (static_cast<s32>(memory.Read32(address)) != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
Memory::Write32(address, static_cast<u32>(value + 1));
|
||||
memory.Write32(address, static_cast<u32>(value + 1));
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!Memory::IsValidVirtualAddress(address)) {
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<SharedPtr<Thread>> waiting_threads = GetThreadsWaitingOnAddress(address);
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
// Determine the modified value depending on the waiting count.
|
||||
s32 updated_value;
|
||||
@@ -107,11 +111,11 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
||||
}
|
||||
}
|
||||
|
||||
if (static_cast<s32>(Memory::Read32(address)) != value) {
|
||||
if (static_cast<s32>(memory.Read32(address)) != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
Memory::Write32(address, static_cast<u32>(updated_value));
|
||||
memory.Write32(address, static_cast<u32>(updated_value));
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -132,18 +136,20 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!Memory::IsValidVirtualAddress(address)) {
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const s32 cur_value = static_cast<s32>(Memory::Read32(address));
|
||||
const s32 cur_value = static_cast<s32>(memory.Read32(address));
|
||||
if (cur_value >= value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
if (should_decrement) {
|
||||
Memory::Write32(address, static_cast<u32>(cur_value - 1));
|
||||
memory.Write32(address, static_cast<u32>(cur_value - 1));
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
@@ -155,15 +161,19 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!Memory::IsValidVirtualAddress(address)) {
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Only wait for the address if equal.
|
||||
if (static_cast<s32>(Memory::Read32(address)) != value) {
|
||||
if (static_cast<s32>(memory.Read32(address)) != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
|
||||
// Short-circuit without rescheduling if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
@@ -172,36 +182,62 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) {
|
||||
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->InvalidateWakeupCallback();
|
||||
|
||||
current_thread->WakeAfterDelay(timeout);
|
||||
|
||||
system.PrepareReschedule(current_thread->GetProcessorID());
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const {
|
||||
void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
|
||||
RemoveThread(thread);
|
||||
thread->SetArbiterWaitAddress(0);
|
||||
}
|
||||
|
||||
// Retrieve all threads that are waiting for this address.
|
||||
std::vector<SharedPtr<Thread>> threads;
|
||||
const auto& scheduler = system.GlobalScheduler();
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetArbiterWaitAddress() == address) {
|
||||
threads.push_back(thread);
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread>& current_thread = *it;
|
||||
if (current_thread->GetPriority() >= thread->GetPriority()) {
|
||||
thread_list.insert(it, thread);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(threads.begin(), threads.end(),
|
||||
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
||||
return lhs->GetPriority() < rhs->GetPriority();
|
||||
});
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread>& current_thread = *it;
|
||||
if (current_thread.get() == thread.get()) {
|
||||
thread_list.erase(it);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
return threads;
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) {
|
||||
std::vector<std::shared_ptr<Thread>> result;
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[address];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
std::shared_ptr<Thread> current_thread = *it;
|
||||
result.push_back(std::move(current_thread));
|
||||
++it;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,10 +4,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
@@ -48,6 +50,9 @@ public:
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
/// Removes a thread from the container and resets its address arbiter adress to 0
|
||||
void HandleWakeupThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
@@ -71,8 +76,20 @@ private:
|
||||
// Waits on the given address with a timeout in nanoseconds
|
||||
ResultCode WaitForAddressImpl(VAddr address, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address);
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
@@ -8,39 +8,35 @@
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/server_port.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {}
|
||||
ClientPort::~ClientPort() = default;
|
||||
|
||||
SharedPtr<ServerPort> ClientPort::GetServerPort() const {
|
||||
std::shared_ptr<ServerPort> ClientPort::GetServerPort() const {
|
||||
return server_port;
|
||||
}
|
||||
|
||||
ResultVal<SharedPtr<ClientSession>> ClientPort::Connect() {
|
||||
// Note: Threads do not wait for the server endpoint to call
|
||||
// AcceptSession before returning from this call.
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
if (active_sessions >= max_sessions) {
|
||||
return ERR_MAX_CONNECTIONS_REACHED;
|
||||
}
|
||||
active_sessions++;
|
||||
|
||||
// Create a new session pair, let the created sessions inherit the parent port's HLE handler.
|
||||
auto [server, client] = ServerSession::CreateSessionPair(kernel, server_port->GetName(), this);
|
||||
auto [client, server] = Kernel::Session::Create(kernel, name);
|
||||
|
||||
if (server_port->HasHLEHandler()) {
|
||||
server_port->GetHLEHandler()->ClientConnected(server);
|
||||
server_port->GetHLEHandler()->ClientConnected(std::move(server));
|
||||
} else {
|
||||
server_port->AppendPendingSession(server);
|
||||
server_port->AppendPendingSession(std::move(server));
|
||||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->WakeupAllWaitingThreads();
|
||||
|
||||
return MakeResult(client);
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
||||
void ClientPort::ConnectionClosed() {
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -17,6 +19,9 @@ class ServerPort;
|
||||
|
||||
class ClientPort final : public Object {
|
||||
public:
|
||||
explicit ClientPort(KernelCore& kernel);
|
||||
~ClientPort() override;
|
||||
|
||||
friend class ServerPort;
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientPort";
|
||||
@@ -30,7 +35,7 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
SharedPtr<ServerPort> GetServerPort() const;
|
||||
std::shared_ptr<ServerPort> GetServerPort() const;
|
||||
|
||||
/**
|
||||
* Creates a new Session pair, adds the created ServerSession to the associated ServerPort's
|
||||
@@ -38,7 +43,7 @@ public:
|
||||
* waiting on it to awake.
|
||||
* @returns ClientSession The client endpoint of the created Session pair, or error code.
|
||||
*/
|
||||
ResultVal<SharedPtr<ClientSession>> Connect();
|
||||
ResultVal<std::shared_ptr<ClientSession>> Connect();
|
||||
|
||||
/**
|
||||
* Signifies that a previously active connection has been closed,
|
||||
@@ -47,10 +52,7 @@ public:
|
||||
void ConnectionClosed();
|
||||
|
||||
private:
|
||||
explicit ClientPort(KernelCore& kernel);
|
||||
~ClientPort() override;
|
||||
|
||||
SharedPtr<ServerPort> server_port; ///< ServerPort associated with this client port.
|
||||
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
|
||||
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
|
||||
u32 active_sessions = 0; ///< Number of currently open sessions to this port
|
||||
std::string name; ///< Name of client port (optional)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -12,29 +12,44 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientSession::ClientSession(KernelCore& kernel) : Object{kernel} {}
|
||||
ClientSession::ClientSession(KernelCore& kernel) : WaitObject{kernel} {}
|
||||
|
||||
ClientSession::~ClientSession() {
|
||||
// This destructor will be called automatically when the last ClientSession handle is closed by
|
||||
// the emulated application.
|
||||
|
||||
// A local reference to the ServerSession is necessary to guarantee it
|
||||
// will be kept alive until after ClientDisconnected() returns.
|
||||
SharedPtr<ServerSession> server = parent->server;
|
||||
if (server) {
|
||||
server->ClientDisconnected();
|
||||
if (parent->Server()) {
|
||||
parent->Server()->ClientDisconnected();
|
||||
}
|
||||
|
||||
parent->client = nullptr;
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(SharedPtr<Thread> thread) {
|
||||
bool ClientSession::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ClientSession::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)};
|
||||
|
||||
client_session->name = std::move(name);
|
||||
client_session->parent = std::move(parent);
|
||||
|
||||
return MakeResult(std::move(client_session));
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
SharedPtr<ServerSession> server = parent->server;
|
||||
if (server == nullptr)
|
||||
if (!parent->Server()) {
|
||||
return ERR_SESSION_CLOSED_BY_REMOTE;
|
||||
}
|
||||
|
||||
// Signal the server session that new data is available
|
||||
return server->HandleSyncRequest(std::move(thread));
|
||||
return parent->Server()->HandleSyncRequest(std::move(thread), memory);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -6,20 +6,28 @@
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
#include "core/hle/kernel/wait_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class ServerSession;
|
||||
class Thread;
|
||||
|
||||
class ClientSession final : public Object {
|
||||
class ClientSession final : public WaitObject {
|
||||
public:
|
||||
friend class ServerSession;
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ClientSession";
|
||||
@@ -34,11 +42,16 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(SharedPtr<Thread> thread);
|
||||
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
private:
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
/// The parent session, which links to the server endpoint.
|
||||
std::shared_ptr<Session> parent;
|
||||
|
||||
@@ -44,7 +44,7 @@ ResultCode HandleTable::SetSize(s32 handle_table_size) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
|
||||
ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
||||
DEBUG_ASSERT(obj != nullptr);
|
||||
|
||||
const u16 slot = next_free_slot;
|
||||
@@ -70,7 +70,7 @@ ResultVal<Handle> HandleTable::Create(SharedPtr<Object> obj) {
|
||||
}
|
||||
|
||||
ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
|
||||
SharedPtr<Object> object = GetGeneric(handle);
|
||||
std::shared_ptr<Object> object = GetGeneric(handle);
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -99,11 +99,11 @@ bool HandleTable::IsValid(Handle handle) const {
|
||||
return slot < table_size && objects[slot] != nullptr && generations[slot] == generation;
|
||||
}
|
||||
|
||||
SharedPtr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
|
||||
if (handle == CurrentThread) {
|
||||
return GetCurrentThread();
|
||||
return SharedFrom(GetCurrentThread());
|
||||
} else if (handle == CurrentProcess) {
|
||||
return Core::System::GetInstance().CurrentProcess();
|
||||
return SharedFrom(Core::System::GetInstance().CurrentProcess());
|
||||
}
|
||||
|
||||
if (!IsValid(handle)) {
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -68,7 +70,7 @@ public:
|
||||
* @return The created Handle or one of the following errors:
|
||||
* - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded.
|
||||
*/
|
||||
ResultVal<Handle> Create(SharedPtr<Object> obj);
|
||||
ResultVal<Handle> Create(std::shared_ptr<Object> obj);
|
||||
|
||||
/**
|
||||
* Returns a new handle that points to the same object as the passed in handle.
|
||||
@@ -92,7 +94,7 @@ public:
|
||||
* Looks up a handle.
|
||||
* @return Pointer to the looked-up object, or `nullptr` if the handle is not valid.
|
||||
*/
|
||||
SharedPtr<Object> GetGeneric(Handle handle) const;
|
||||
std::shared_ptr<Object> GetGeneric(Handle handle) const;
|
||||
|
||||
/**
|
||||
* Looks up a handle while verifying its type.
|
||||
@@ -100,7 +102,7 @@ public:
|
||||
* type differs from the requested one.
|
||||
*/
|
||||
template <class T>
|
||||
SharedPtr<T> Get(Handle handle) const {
|
||||
std::shared_ptr<T> Get(Handle handle) const {
|
||||
return DynamicObjectCast<T>(GetGeneric(handle));
|
||||
}
|
||||
|
||||
@@ -109,7 +111,7 @@ public:
|
||||
|
||||
private:
|
||||
/// Stores the Object referenced by the handle or null if the slot is empty.
|
||||
std::array<SharedPtr<Object>, MAX_COUNT> objects;
|
||||
std::array<std::shared_ptr<Object>, MAX_COUNT> objects;
|
||||
|
||||
/**
|
||||
* The value of `next_generation` when the handle was created, used to check for validity. For
|
||||
|
||||
@@ -32,23 +32,25 @@ SessionRequestHandler::SessionRequestHandler() = default;
|
||||
|
||||
SessionRequestHandler::~SessionRequestHandler() = default;
|
||||
|
||||
void SessionRequestHandler::ClientConnected(SharedPtr<ServerSession> server_session) {
|
||||
void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) {
|
||||
server_session->SetHleHandler(shared_from_this());
|
||||
connected_sessions.push_back(std::move(server_session));
|
||||
}
|
||||
|
||||
void SessionRequestHandler::ClientDisconnected(const SharedPtr<ServerSession>& server_session) {
|
||||
void SessionRequestHandler::ClientDisconnected(
|
||||
const std::shared_ptr<ServerSession>& server_session) {
|
||||
server_session->SetHleHandler(nullptr);
|
||||
boost::range::remove_erase(connected_sessions, server_session);
|
||||
}
|
||||
|
||||
SharedPtr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||
std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||
const std::string& reason, u64 timeout, WakeupCallback&& callback,
|
||||
SharedPtr<WritableEvent> writable_event) {
|
||||
std::shared_ptr<WritableEvent> writable_event) {
|
||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||
thread->SetWakeupCallback([context = *this, callback](
|
||||
ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
|
||||
thread->SetWakeupCallback([context = *this, callback](ThreadWakeupReason reason,
|
||||
std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object,
|
||||
std::size_t index) mutable -> bool {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent);
|
||||
callback(thread, context, reason);
|
||||
context.WriteToOutgoingCommandBuffer(*thread);
|
||||
@@ -58,8 +60,7 @@ SharedPtr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
if (!writable_event) {
|
||||
// Create event if not provided
|
||||
const auto pair = WritableEvent::CreateEventPair(kernel, ResetType::Automatic,
|
||||
"HLE Pause Event: " + reason);
|
||||
const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
|
||||
writable_event = pair.writable;
|
||||
}
|
||||
|
||||
@@ -73,11 +74,13 @@ SharedPtr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||
thread->WakeAfterDelay(timeout);
|
||||
}
|
||||
|
||||
is_thread_waiting = true;
|
||||
|
||||
return writable_event;
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(SharedPtr<Kernel::ServerSession> server_session,
|
||||
SharedPtr<Thread> thread)
|
||||
HLERequestContext::HLERequestContext(std::shared_ptr<Kernel::ServerSession> server_session,
|
||||
std::shared_ptr<Thread> thread)
|
||||
: server_session(std::move(server_session)), thread(std::move(thread)) {
|
||||
cmd_buf[0] = 0;
|
||||
}
|
||||
@@ -213,10 +216,11 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
auto& owner_process = *thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
auto& memory = Core::System::GetInstance().Memory();
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf;
|
||||
Memory::ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
memory.ReadBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
// The header was already built in the internal command buffer. Attempt to parse it to verify
|
||||
// the integrity and then copy it over to the target command buffer.
|
||||
@@ -272,8 +276,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
}
|
||||
|
||||
// Copy the translated command buffer back into the thread's command buffer area.
|
||||
Memory::WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
memory.WriteBlock(owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(),
|
||||
dst_cmdbuf.size() * sizeof(u32));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -281,15 +285,14 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
|
||||
std::vector<u8> buffer;
|
||||
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
|
||||
auto& memory = Core::System::GetInstance().Memory();
|
||||
|
||||
if (is_buffer_a) {
|
||||
buffer.resize(BufferDescriptorA()[buffer_index].Size());
|
||||
Memory::ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(),
|
||||
buffer.size());
|
||||
memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
|
||||
} else {
|
||||
buffer.resize(BufferDescriptorX()[buffer_index].Size());
|
||||
Memory::ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(),
|
||||
buffer.size());
|
||||
memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
|
||||
}
|
||||
|
||||
return buffer;
|
||||
@@ -310,10 +313,11 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
|
||||
size = buffer_size; // TODO(bunnei): This needs to be HW tested
|
||||
}
|
||||
|
||||
auto& memory = Core::System::GetInstance().Memory();
|
||||
if (is_buffer_b) {
|
||||
Memory::WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
|
||||
memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
|
||||
} else {
|
||||
Memory::WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
|
||||
memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
|
||||
}
|
||||
|
||||
return size;
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
@@ -60,20 +61,20 @@ public:
|
||||
* associated ServerSession alive for the duration of the connection.
|
||||
* @param server_session Owning pointer to the ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientConnected(SharedPtr<ServerSession> server_session);
|
||||
void ClientConnected(std::shared_ptr<ServerSession> server_session);
|
||||
|
||||
/**
|
||||
* Signals that a client has just disconnected from this HLE handler and releases the
|
||||
* associated ServerSession.
|
||||
* @param server_session ServerSession associated with the connection.
|
||||
*/
|
||||
void ClientDisconnected(const SharedPtr<ServerSession>& server_session);
|
||||
void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session);
|
||||
|
||||
protected:
|
||||
/// List of sessions that are connected to this handler.
|
||||
/// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list
|
||||
/// for the duration of the connection.
|
||||
std::vector<SharedPtr<ServerSession>> connected_sessions;
|
||||
std::vector<std::shared_ptr<ServerSession>> connected_sessions;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -97,7 +98,8 @@ protected:
|
||||
*/
|
||||
class HLERequestContext {
|
||||
public:
|
||||
explicit HLERequestContext(SharedPtr<ServerSession> session, SharedPtr<Thread> thread);
|
||||
explicit HLERequestContext(std::shared_ptr<ServerSession> session,
|
||||
std::shared_ptr<Thread> thread);
|
||||
~HLERequestContext();
|
||||
|
||||
/// Returns a pointer to the IPC command buffer for this request.
|
||||
@@ -109,12 +111,12 @@ public:
|
||||
* Returns the session through which this request was made. This can be used as a map key to
|
||||
* access per-client data on services.
|
||||
*/
|
||||
const SharedPtr<Kernel::ServerSession>& Session() const {
|
||||
const std::shared_ptr<Kernel::ServerSession>& Session() const {
|
||||
return server_session;
|
||||
}
|
||||
|
||||
using WakeupCallback = std::function<void(SharedPtr<Thread> thread, HLERequestContext& context,
|
||||
ThreadWakeupReason reason)>;
|
||||
using WakeupCallback = std::function<void(
|
||||
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
|
||||
/**
|
||||
* Puts the specified guest thread to sleep until the returned event is signaled or until the
|
||||
@@ -129,9 +131,9 @@ public:
|
||||
* created.
|
||||
* @returns Event that when signaled will resume the thread and call the callback function.
|
||||
*/
|
||||
SharedPtr<WritableEvent> SleepClientThread(const std::string& reason, u64 timeout,
|
||||
WakeupCallback&& callback,
|
||||
SharedPtr<WritableEvent> writable_event = nullptr);
|
||||
std::shared_ptr<WritableEvent> SleepClientThread(
|
||||
const std::string& reason, u64 timeout, WakeupCallback&& callback,
|
||||
std::shared_ptr<WritableEvent> writable_event = nullptr);
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
@@ -209,20 +211,20 @@ public:
|
||||
std::size_t GetWriteBufferSize(int buffer_index = 0) const;
|
||||
|
||||
template <typename T>
|
||||
SharedPtr<T> GetCopyObject(std::size_t index) {
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(copy_objects.at(index));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
SharedPtr<T> GetMoveObject(std::size_t index) {
|
||||
std::shared_ptr<T> GetMoveObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(move_objects.at(index));
|
||||
}
|
||||
|
||||
void AddMoveObject(SharedPtr<Object> object) {
|
||||
void AddMoveObject(std::shared_ptr<Object> object) {
|
||||
move_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
void AddCopyObject(SharedPtr<Object> object) {
|
||||
void AddCopyObject(std::shared_ptr<Object> object) {
|
||||
copy_objects.emplace_back(std::move(object));
|
||||
}
|
||||
|
||||
@@ -262,15 +264,27 @@ public:
|
||||
|
||||
std::string Description() const;
|
||||
|
||||
Thread& GetThread() {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
const Thread& GetThread() const {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
bool IsThreadWaiting() const {
|
||||
return is_thread_waiting;
|
||||
}
|
||||
|
||||
private:
|
||||
void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||
SharedPtr<Kernel::ServerSession> server_session;
|
||||
SharedPtr<Thread> thread;
|
||||
std::shared_ptr<Kernel::ServerSession> server_session;
|
||||
std::shared_ptr<Thread> thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<SharedPtr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<SharedPtr<Object>, 8> copy_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
|
||||
boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects;
|
||||
|
||||
std::optional<IPC::CommandHeader> command_header;
|
||||
@@ -288,6 +302,7 @@ private:
|
||||
u32_le command{};
|
||||
|
||||
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
|
||||
bool is_thread_waiting{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
@@ -40,7 +39,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
// Lock the global kernel mutex when we enter the kernel HLE.
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
|
||||
SharedPtr<Thread> thread =
|
||||
std::shared_ptr<Thread> thread =
|
||||
system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
|
||||
if (thread == nullptr) {
|
||||
LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
|
||||
@@ -53,7 +52,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
thread->GetStatus() == ThreadStatus::WaitHLEEvent) {
|
||||
// Remove the thread from each of its waiting objects' waitlists
|
||||
for (const auto& object : thread->GetWaitObjects()) {
|
||||
object->RemoveWaitingThread(thread.get());
|
||||
object->RemoveWaitingThread(thread);
|
||||
}
|
||||
thread->ClearWaitObjects();
|
||||
|
||||
@@ -64,8 +63,11 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
} else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
|
||||
thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->GetOwnerProcess()->RemoveConditionVariableThread(thread);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
}
|
||||
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
// Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance
|
||||
@@ -76,9 +78,9 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->GetArbiterWaitAddress() != 0) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
|
||||
thread->SetArbiterWaitAddress(0);
|
||||
if (thread->GetStatus() == ThreadStatus::WaitArb) {
|
||||
auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter();
|
||||
address_arbiter.HandleWakeupThread(thread);
|
||||
}
|
||||
|
||||
if (resume) {
|
||||
@@ -136,12 +138,12 @@ struct KernelCore::Impl {
|
||||
|
||||
void InitializeThreads() {
|
||||
thread_wakeup_event_type =
|
||||
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
||||
Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
||||
}
|
||||
|
||||
void InitializePreemption() {
|
||||
preemption_event = system.CoreTiming().RegisterEvent(
|
||||
"PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
|
||||
preemption_event =
|
||||
Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
|
||||
global_scheduler.PreemptThreads();
|
||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
@@ -151,20 +153,31 @@ struct KernelCore::Impl {
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
}
|
||||
|
||||
void MakeCurrentProcess(Process* process) {
|
||||
current_process = process;
|
||||
|
||||
if (process == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
system.Memory().SetCurrentPageTable(*process);
|
||||
}
|
||||
|
||||
std::atomic<u32> next_object_id{0};
|
||||
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
||||
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
||||
std::atomic<u64> next_thread_id{1};
|
||||
|
||||
// Lists all processes that exist in the current session.
|
||||
std::vector<SharedPtr<Process>> process_list;
|
||||
std::vector<std::shared_ptr<Process>> process_list;
|
||||
Process* current_process = nullptr;
|
||||
Kernel::GlobalScheduler global_scheduler;
|
||||
|
||||
SharedPtr<ResourceLimit> system_resource_limit;
|
||||
std::shared_ptr<ResourceLimit> system_resource_limit;
|
||||
|
||||
std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type;
|
||||
std::shared_ptr<Core::Timing::EventType> preemption_event;
|
||||
|
||||
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
|
||||
Core::Timing::EventType* preemption_event = nullptr;
|
||||
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
|
||||
// allowing us to simply use a pool index or similar.
|
||||
Kernel::HandleTable thread_wakeup_callback_handle_table;
|
||||
@@ -190,26 +203,21 @@ void KernelCore::Shutdown() {
|
||||
impl->Shutdown();
|
||||
}
|
||||
|
||||
SharedPtr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
||||
std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
|
||||
return impl->system_resource_limit;
|
||||
}
|
||||
|
||||
SharedPtr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const {
|
||||
std::shared_ptr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable(
|
||||
Handle handle) const {
|
||||
return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle);
|
||||
}
|
||||
|
||||
void KernelCore::AppendNewProcess(SharedPtr<Process> process) {
|
||||
void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
|
||||
impl->process_list.push_back(std::move(process));
|
||||
}
|
||||
|
||||
void KernelCore::MakeCurrentProcess(Process* process) {
|
||||
impl->current_process = process;
|
||||
|
||||
if (process == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
Memory::SetCurrentPageTable(*process);
|
||||
impl->MakeCurrentProcess(process);
|
||||
}
|
||||
|
||||
Process* KernelCore::CurrentProcess() {
|
||||
@@ -220,7 +228,7 @@ const Process* KernelCore::CurrentProcess() const {
|
||||
return impl->current_process;
|
||||
}
|
||||
|
||||
const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
|
||||
const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
}
|
||||
|
||||
@@ -232,7 +240,7 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
|
||||
return impl->global_scheduler;
|
||||
}
|
||||
|
||||
void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
|
||||
void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) {
|
||||
impl->named_ports.emplace(std::move(name), std::move(port));
|
||||
}
|
||||
|
||||
@@ -265,7 +273,7 @@ u64 KernelCore::CreateNewUserProcessID() {
|
||||
return impl->next_user_process_id++;
|
||||
}
|
||||
|
||||
Core::Timing::EventType* KernelCore::ThreadWakeupCallbackEventType() const {
|
||||
const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const {
|
||||
return impl->thread_wakeup_event_type;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -30,7 +32,7 @@ class Thread;
|
||||
/// Represents a single instance of the kernel.
|
||||
class KernelCore {
|
||||
private:
|
||||
using NamedPortTable = std::unordered_map<std::string, SharedPtr<ClientPort>>;
|
||||
using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>;
|
||||
|
||||
public:
|
||||
/// Constructs an instance of the kernel using the given System
|
||||
@@ -56,13 +58,13 @@ public:
|
||||
void Shutdown();
|
||||
|
||||
/// Retrieves a shared pointer to the system resource limit instance.
|
||||
SharedPtr<ResourceLimit> GetSystemResourceLimit() const;
|
||||
std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
|
||||
|
||||
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
|
||||
SharedPtr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const;
|
||||
std::shared_ptr<Thread> RetrieveThreadFromWakeupCallbackHandleTable(Handle handle) const;
|
||||
|
||||
/// Adds the given shared pointer to an internal list of active processes.
|
||||
void AppendNewProcess(SharedPtr<Process> process);
|
||||
void AppendNewProcess(std::shared_ptr<Process> process);
|
||||
|
||||
/// Makes the given process the new current process.
|
||||
void MakeCurrentProcess(Process* process);
|
||||
@@ -74,7 +76,7 @@ public:
|
||||
const Process* CurrentProcess() const;
|
||||
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<SharedPtr<Process>>& GetProcessList() const;
|
||||
const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
Kernel::GlobalScheduler& GlobalScheduler();
|
||||
@@ -83,7 +85,7 @@ public:
|
||||
const Kernel::GlobalScheduler& GlobalScheduler() const;
|
||||
|
||||
/// Adds a port to the named port table
|
||||
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
|
||||
void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
|
||||
|
||||
/// Finds a port within the named port table with the given name.
|
||||
NamedPortTable::iterator FindNamedPort(const std::string& name);
|
||||
@@ -112,7 +114,7 @@ private:
|
||||
u64 CreateNewThreadID();
|
||||
|
||||
/// Retrieves the event type used for thread wakeup callbacks.
|
||||
Core::Timing::EventType* ThreadWakeupCallbackEventType() const;
|
||||
const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const;
|
||||
|
||||
/// Provides a reference to the thread wakeup callback handle table.
|
||||
Kernel::HandleTable& ThreadWakeupCallbackHandleTable();
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -22,10 +23,10 @@ namespace Kernel {
|
||||
|
||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
||||
/// those.
|
||||
static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const SharedPtr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
|
||||
SharedPtr<Thread> highest_priority_thread;
|
||||
std::shared_ptr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
@@ -45,14 +46,14 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
}
|
||||
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread,
|
||||
SharedPtr<Thread> new_owner) {
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
||||
std::shared_ptr<Thread> new_owner) {
|
||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->GetLockOwner() == current_thread);
|
||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
@@ -70,15 +71,16 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
}
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
Thread* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
SharedPtr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(system.CurrentScheduler().GetCurrentThread());
|
||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle);
|
||||
|
||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another
|
||||
// thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
const u32 addr_value = Memory::Read32(address);
|
||||
const u32 addr_value = system.Memory().Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
@@ -110,12 +112,13 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(system.CurrentScheduler().GetCurrentThread());
|
||||
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
|
||||
|
||||
// There are no more threads waiting for the mutex, release it completely.
|
||||
if (thread == nullptr) {
|
||||
Memory::Write32(address, 0);
|
||||
system.Memory().Write32(address, 0);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -130,7 +133,7 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
}
|
||||
|
||||
// Grant the mutex to the next waiting thread and resume it.
|
||||
Memory::Write32(address, mutex_value);
|
||||
system.Memory().Write32(address, mutex_value);
|
||||
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex);
|
||||
thread->ResumeFromWait();
|
||||
|
||||
@@ -27,6 +27,7 @@ bool Object::IsWaitable() const {
|
||||
case HandleType::ResourceLimit:
|
||||
case HandleType::ClientPort:
|
||||
case HandleType::ClientSession:
|
||||
case HandleType::Session:
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -30,14 +29,10 @@ enum class HandleType : u32 {
|
||||
ServerPort,
|
||||
ClientSession,
|
||||
ServerSession,
|
||||
Session,
|
||||
};
|
||||
|
||||
enum class ResetType {
|
||||
Automatic, ///< Reset automatically on object acquisition
|
||||
Manual, ///< Never reset automatically
|
||||
};
|
||||
|
||||
class Object : NonCopyable {
|
||||
class Object : NonCopyable, public std::enable_shared_from_this<Object> {
|
||||
public:
|
||||
explicit Object(KernelCore& kernel);
|
||||
virtual ~Object();
|
||||
@@ -66,35 +61,24 @@ protected:
|
||||
KernelCore& kernel;
|
||||
|
||||
private:
|
||||
friend void intrusive_ptr_add_ref(Object*);
|
||||
friend void intrusive_ptr_release(Object*);
|
||||
|
||||
std::atomic<u32> ref_count{0};
|
||||
std::atomic<u32> object_id{0};
|
||||
};
|
||||
|
||||
// Special functions used by boost::instrusive_ptr to do automatic ref-counting
|
||||
inline void intrusive_ptr_add_ref(Object* object) {
|
||||
object->ref_count.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(Object* object) {
|
||||
if (object->ref_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
|
||||
delete object;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
using SharedPtr = boost::intrusive_ptr<T>;
|
||||
std::shared_ptr<T> SharedFrom(T* raw) {
|
||||
if (raw == nullptr)
|
||||
return nullptr;
|
||||
return std::static_pointer_cast<T>(raw->shared_from_this());
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to downcast the given Object pointer to a pointer to T.
|
||||
* @return Derived pointer to the object, or `nullptr` if `object` isn't of type T.
|
||||
*/
|
||||
template <typename T>
|
||||
inline SharedPtr<T> DynamicObjectCast(SharedPtr<Object> object) {
|
||||
inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) {
|
||||
return boost::static_pointer_cast<T>(object);
|
||||
return std::static_pointer_cast<T>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
|
||||
auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0,
|
||||
owner_process.GetIdealCore(), stack_top, owner_process);
|
||||
|
||||
SharedPtr<Thread> thread = std::move(thread_res).Unwrap();
|
||||
std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap();
|
||||
|
||||
// Register 1 must be a handle to the main thread
|
||||
const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
|
||||
@@ -100,10 +100,10 @@ private:
|
||||
std::bitset<num_slot_entries> is_slot_used;
|
||||
};
|
||||
|
||||
SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
|
||||
std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
SharedPtr<Process> process(new Process(system));
|
||||
std::shared_ptr<Process> process = std::make_shared<Process>(system);
|
||||
process->name = std::move(name);
|
||||
process->resource_limit = kernel.GetSystemResourceLimit();
|
||||
process->status = ProcessStatus::Created;
|
||||
@@ -121,7 +121,7 @@ SharedPtr<Process> Process::Create(Core::System& system, std::string name, Proce
|
||||
return process;
|
||||
}
|
||||
|
||||
SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
|
||||
std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
|
||||
return resource_limit;
|
||||
}
|
||||
|
||||
@@ -142,6 +142,49 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread->GetPriority() > thread->GetPriority()) {
|
||||
thread_list.insert(it, thread);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread.get() == thread.get()) {
|
||||
thread_list.erase(it);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
|
||||
const VAddr cond_var_addr) {
|
||||
std::vector<std::shared_ptr<Thread>> result{};
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
std::shared_ptr<Thread> current_thread = *it;
|
||||
result.push_back(current_thread);
|
||||
++it;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const Thread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
@@ -197,12 +240,12 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
void Process::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) {
|
||||
const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) {
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread == system.CurrentScheduler().GetCurrentThread())
|
||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread())
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <cstddef>
|
||||
#include <list>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
@@ -61,6 +62,9 @@ enum class ProcessStatus {
|
||||
|
||||
class Process final : public WaitObject {
|
||||
public:
|
||||
explicit Process(Core::System& system);
|
||||
~Process() override;
|
||||
|
||||
enum : u64 {
|
||||
/// Lowest allowed process ID for a kernel initial process.
|
||||
InitialKIPIDMin = 1,
|
||||
@@ -81,7 +85,8 @@ public:
|
||||
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static SharedPtr<Process> Create(Core::System& system, std::string name, ProcessType type);
|
||||
static std::shared_ptr<Process> Create(Core::System& system, std::string name,
|
||||
ProcessType type);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "Process";
|
||||
@@ -156,7 +161,7 @@ public:
|
||||
}
|
||||
|
||||
/// Gets the resource limit descriptor for this process
|
||||
SharedPtr<ResourceLimit> GetResourceLimit() const;
|
||||
std::shared_ptr<ResourceLimit> GetResourceLimit() const;
|
||||
|
||||
/// Gets the ideal CPU core ID for this process
|
||||
u8 GetIdealCore() const {
|
||||
@@ -232,6 +237,15 @@ public:
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Insert a thread into the condition variable wait container
|
||||
void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Remove a thread from the condition variable wait container
|
||||
void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Obtain all condition variable threads waiting for some address
|
||||
std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const Thread* thread);
|
||||
@@ -287,9 +301,6 @@ public:
|
||||
void FreeTLSRegion(VAddr tls_address);
|
||||
|
||||
private:
|
||||
explicit Process(Core::System& system);
|
||||
~Process() override;
|
||||
|
||||
/// Checks if the specified thread should wait until this process is available.
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
@@ -328,7 +339,7 @@ private:
|
||||
u32 system_resource_size = 0;
|
||||
|
||||
/// Resource limit descriptor for this process
|
||||
SharedPtr<ResourceLimit> resource_limit;
|
||||
std::shared_ptr<ResourceLimit> resource_limit;
|
||||
|
||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||
u8 ideal_core = 0;
|
||||
@@ -375,6 +386,9 @@ private:
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const Thread*> thread_list;
|
||||
|
||||
/// List of threads waiting for a condition variable
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
|
||||
|
||||
/// System context
|
||||
Core::System& system;
|
||||
|
||||
|
||||
@@ -20,15 +20,13 @@ bool ReadableEvent::ShouldWait(const Thread* thread) const {
|
||||
|
||||
void ReadableEvent::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
|
||||
if (reset_type == ResetType::Automatic) {
|
||||
signaled = false;
|
||||
}
|
||||
}
|
||||
|
||||
void ReadableEvent::Signal() {
|
||||
signaled = true;
|
||||
WakeupAllWaitingThreads();
|
||||
if (!signaled) {
|
||||
signaled = true;
|
||||
WakeupAllWaitingThreads();
|
||||
};
|
||||
}
|
||||
|
||||
void ReadableEvent::Clear() {
|
||||
|
||||
@@ -27,10 +27,6 @@ public:
|
||||
return name;
|
||||
}
|
||||
|
||||
ResetType GetResetType() const {
|
||||
return reset_type;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
@@ -55,8 +51,7 @@ private:
|
||||
|
||||
void Signal();
|
||||
|
||||
ResetType reset_type;
|
||||
bool signaled;
|
||||
bool signaled{};
|
||||
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
@@ -16,8 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
|
||||
ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
|
||||
ResourceLimit::~ResourceLimit() = default;
|
||||
|
||||
SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
return new ResourceLimit(kernel);
|
||||
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
return std::make_shared<ResourceLimit>(kernel);
|
||||
}
|
||||
|
||||
s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
@@ -31,8 +33,11 @@ constexpr bool IsValidResourceType(ResourceType type) {
|
||||
|
||||
class ResourceLimit final : public Object {
|
||||
public:
|
||||
explicit ResourceLimit(KernelCore& kernel);
|
||||
~ResourceLimit() override;
|
||||
|
||||
/// Creates a resource limit object.
|
||||
static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
|
||||
static std::shared_ptr<ResourceLimit> Create(KernelCore& kernel);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ResourceLimit";
|
||||
@@ -76,9 +81,6 @@ public:
|
||||
ResultCode SetLimitValue(ResourceType resource, s64 value);
|
||||
|
||||
private:
|
||||
explicit ResourceLimit(KernelCore& kernel);
|
||||
~ResourceLimit() override;
|
||||
|
||||
// TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
|
||||
// functions
|
||||
//
|
||||
|
||||
@@ -22,44 +22,31 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
|
||||
is_reselection_pending = false;
|
||||
}
|
||||
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {}
|
||||
|
||||
void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
|
||||
GlobalScheduler::~GlobalScheduler() = default;
|
||||
|
||||
void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalScheduler::RemoveThread(const Thread* thread) {
|
||||
void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
/*
|
||||
* UnloadThread selects a core and forces it to unload its current thread's context
|
||||
*/
|
||||
void GlobalScheduler::UnloadThread(s32 core) {
|
||||
void GlobalScheduler::UnloadThread(std::size_t core) {
|
||||
Scheduler& sched = system.Scheduler(core);
|
||||
sched.UnloadThread();
|
||||
}
|
||||
|
||||
/*
|
||||
* SelectThread takes care of selecting the new scheduled thread.
|
||||
* It does it in 3 steps:
|
||||
* - First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
* - Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
* - Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
*/
|
||||
void GlobalScheduler::SelectThread(u32 core) {
|
||||
void GlobalScheduler::SelectThread(std::size_t core) {
|
||||
const auto update_thread = [](Thread* thread, Scheduler& sched) {
|
||||
if (thread != sched.selected_thread) {
|
||||
if (thread != sched.selected_thread.get()) {
|
||||
if (thread == nullptr) {
|
||||
++sched.idle_selection_count;
|
||||
}
|
||||
sched.selected_thread = thread;
|
||||
sched.selected_thread = SharedFrom(thread);
|
||||
}
|
||||
sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
@@ -90,9 +77,9 @@ void GlobalScheduler::SelectThread(u32 core) {
|
||||
// if we got a suggested thread, select it, else do a second pass.
|
||||
if (winner && winner->GetPriority() > 2) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
UnloadThread(static_cast<u32>(winner->GetProcessorID()));
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core, winner);
|
||||
TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner);
|
||||
update_thread(winner, sched);
|
||||
return;
|
||||
}
|
||||
@@ -104,9 +91,9 @@ void GlobalScheduler::SelectThread(u32 core) {
|
||||
Thread* thread_on_core = scheduled_queue[src_core].front();
|
||||
Thread* to_change = *it;
|
||||
if (thread_on_core->IsRunning() || to_change->IsRunning()) {
|
||||
UnloadThread(src_core);
|
||||
UnloadThread(static_cast<u32>(src_core));
|
||||
}
|
||||
TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
|
||||
TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core);
|
||||
current_thread = thread_on_core;
|
||||
break;
|
||||
}
|
||||
@@ -114,30 +101,19 @@ void GlobalScheduler::SelectThread(u32 core) {
|
||||
update_thread(current_thread, sched);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThread takes a thread and moves it to the back of the it's priority list
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
|
||||
// Note: caller should use critical section, etc.
|
||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
||||
const u32 priority = yielding_thread->GetPriority();
|
||||
|
||||
// Yield the thread
|
||||
ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
|
||||
"Thread yielding without being in front");
|
||||
const Thread* const winner = scheduled_queue[core_id].front(priority);
|
||||
ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front");
|
||||
scheduled_queue[core_id].yield(priority);
|
||||
|
||||
Thread* winner = scheduled_queue[core_id].front(priority);
|
||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
|
||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||
* a better priority than the next thread in the core.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
||||
// etc.
|
||||
@@ -178,9 +154,9 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
UnloadThread(static_cast<u32>(winner->GetProcessorID()));
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
||||
}
|
||||
} else {
|
||||
winner = next_thread;
|
||||
@@ -189,12 +165,6 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
|
||||
* and into the suggested queue. If no thread can be squeduled afterwards in that core,
|
||||
* a suggested thread is obtained instead.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
|
||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
||||
// etc.
|
||||
@@ -226,9 +196,9 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
UnloadThread(static_cast<u32>(winner->GetProcessorID()));
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
|
||||
}
|
||||
} else {
|
||||
winner = yielding_thread;
|
||||
@@ -278,9 +248,9 @@ void GlobalScheduler::PreemptThreads() {
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
UnloadThread(static_cast<u32>(winner->GetProcessorID()));
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
||||
current_thread =
|
||||
winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
|
||||
}
|
||||
@@ -311,9 +281,9 @@ void GlobalScheduler::PreemptThreads() {
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
UnloadThread(static_cast<u32>(winner->GetProcessorID()));
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
TransferToCore(winner->GetPriority(), s32(core_id), winner);
|
||||
current_thread = winner;
|
||||
}
|
||||
}
|
||||
@@ -322,30 +292,30 @@ void GlobalScheduler::PreemptThreads() {
|
||||
}
|
||||
}
|
||||
|
||||
void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
|
||||
void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
|
||||
suggested_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
|
||||
void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
|
||||
suggested_queue[core].remove(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
||||
void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
|
||||
scheduled_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
||||
void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
|
||||
scheduled_queue[core].add(thread, priority, false);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
|
||||
void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
|
||||
scheduled_queue[core].remove(thread, priority);
|
||||
scheduled_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
|
||||
void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
|
||||
scheduled_queue[core].remove(thread, priority);
|
||||
}
|
||||
|
||||
@@ -357,18 +327,19 @@ void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread*
|
||||
}
|
||||
thread->SetProcessorID(destination_core);
|
||||
if (source_core >= 0) {
|
||||
Unschedule(priority, source_core, thread);
|
||||
Unschedule(priority, static_cast<u32>(source_core), thread);
|
||||
}
|
||||
if (destination_core >= 0) {
|
||||
Unsuggest(priority, destination_core, thread);
|
||||
Schedule(priority, destination_core, thread);
|
||||
Unsuggest(priority, static_cast<u32>(destination_core), thread);
|
||||
Schedule(priority, static_cast<u32>(destination_core), thread);
|
||||
}
|
||||
if (source_core >= 0) {
|
||||
Suggest(priority, source_core, thread);
|
||||
Suggest(priority, static_cast<u32>(source_core), thread);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
|
||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
|
||||
const Thread* winner) {
|
||||
if (current_thread == winner) {
|
||||
current_thread->IncrementYieldCount();
|
||||
return true;
|
||||
@@ -386,9 +357,7 @@ void GlobalScheduler::Shutdown() {
|
||||
thread_list.clear();
|
||||
}
|
||||
|
||||
GlobalScheduler::~GlobalScheduler() = default;
|
||||
|
||||
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id)
|
||||
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id)
|
||||
: system(system), cpu_core(cpu_core), core_id(core_id) {}
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
@@ -470,14 +439,14 @@ void Scheduler::SwitchContext() {
|
||||
|
||||
// Load context of new thread
|
||||
if (new_thread) {
|
||||
ASSERT_MSG(new_thread->GetProcessorID() == this->core_id,
|
||||
ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id),
|
||||
"Thread must be assigned to this core.");
|
||||
ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
|
||||
"Thread must be ready to become running.");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
new_thread->CancelWakeupTimer();
|
||||
current_thread = new_thread;
|
||||
current_thread = SharedFrom(new_thread);
|
||||
new_thread->SetStatus(ThreadStatus::Running);
|
||||
new_thread->SetIsRunning(true);
|
||||
|
||||
@@ -489,7 +458,6 @@ void Scheduler::SwitchContext() {
|
||||
cpu_core.LoadContext(new_thread->GetContext());
|
||||
cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
} else {
|
||||
current_thread = nullptr;
|
||||
// Note: We do not reset the current process and current page table when idling because
|
||||
|
||||
@@ -4,11 +4,12 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/multi_level_queue.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -26,92 +27,100 @@ public:
|
||||
|
||||
explicit GlobalScheduler(Core::System& system);
|
||||
~GlobalScheduler();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(SharedPtr<Thread> thread);
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(const Thread* thread);
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
const std::vector<SharedPtr<Thread>>& GetThreadList() const {
|
||||
const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
// Add a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
// picked if no thread is scheduled to run on the core.
|
||||
void Suggest(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Remove a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
// picked if no thread is scheduled to run on the core.
|
||||
void Unsuggest(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
// back the queue in its priority level
|
||||
void Schedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
// front the queue in its priority level
|
||||
void SchedulePrepend(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Reschedule an already scheduled thread based on a new priority
|
||||
void Reschedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Unschedule a thread.
|
||||
void Unschedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Transfers a thread into an specific core. If the destination_core is -1
|
||||
// it will be unscheduled from its source code and added into its suggested
|
||||
// queue.
|
||||
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
|
||||
|
||||
/*
|
||||
* UnloadThread selects a core and forces it to unload its current thread's context
|
||||
/**
|
||||
* Add a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
* picked if no thread is scheduled to run on the core.
|
||||
*/
|
||||
void UnloadThread(s32 core);
|
||||
void Suggest(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
/*
|
||||
* SelectThread takes care of selecting the new scheduled thread.
|
||||
* It does it in 3 steps:
|
||||
* - First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
* - Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
* - Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
/**
|
||||
* Remove a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
* picked if no thread is scheduled to run on the core.
|
||||
*/
|
||||
void SelectThread(u32 core);
|
||||
void Unsuggest(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
bool HaveReadyThreads(u32 core_id) const {
|
||||
/**
|
||||
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
* back the queue in its priority level.
|
||||
*/
|
||||
void Schedule(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
/**
|
||||
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
* front the queue in its priority level.
|
||||
*/
|
||||
void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
/// Reschedule an already scheduled thread based on a new priority
|
||||
void Reschedule(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
/// Unschedules a thread.
|
||||
void Unschedule(u32 priority, std::size_t core, Thread* thread);
|
||||
|
||||
/// Selects a core and forces it to unload its current thread's context
|
||||
void UnloadThread(std::size_t core);
|
||||
|
||||
/**
|
||||
* Takes care of selecting the new scheduled thread in three steps:
|
||||
*
|
||||
* 1. First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
*
|
||||
* 2. Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
*
|
||||
* 3. Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
*/
|
||||
void SelectThread(std::size_t core);
|
||||
|
||||
bool HaveReadyThreads(std::size_t core_id) const {
|
||||
return !scheduled_queue[core_id].empty();
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThread takes a thread and moves it to the back of the it's priority list
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThread(Thread* thread);
|
||||
|
||||
/*
|
||||
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||
* a better priority than the next thread in the core.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThreadAndBalanceLoad(Thread* thread);
|
||||
|
||||
/*
|
||||
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
|
||||
* and into the suggested queue. If no thread can be squeduled afterwards in that core,
|
||||
/**
|
||||
* Takes a thread and moves it out of the scheduling queue.
|
||||
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
|
||||
* a suggested thread is obtained instead.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
||||
|
||||
/*
|
||||
* PreemptThreads this operation rotates the scheduling queues of threads at
|
||||
* a preemption priority and then does some core rebalancing. Preemption priorities
|
||||
* can be found in the array 'preemption_priorities'. This operation happens
|
||||
* every 10ms.
|
||||
/**
|
||||
* Rotates the scheduling queues of threads at a preemption priority and then does
|
||||
* some core rebalancing. Preemption priorities can be found in the array
|
||||
* 'preemption_priorities'.
|
||||
*
|
||||
* @note This operation happens every 10ms.
|
||||
*/
|
||||
void PreemptThreads();
|
||||
|
||||
@@ -130,25 +139,32 @@ public:
|
||||
void Shutdown();
|
||||
|
||||
private:
|
||||
bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
|
||||
/**
|
||||
* Transfers a thread into an specific core. If the destination_core is -1
|
||||
* it will be unscheduled from its source code and added into its suggested
|
||||
* queue.
|
||||
*/
|
||||
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
|
||||
|
||||
bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
|
||||
|
||||
static constexpr u32 min_regular_priority = 2;
|
||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
|
||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
|
||||
std::atomic<bool> is_reselection_pending;
|
||||
std::atomic<bool> is_reselection_pending{false};
|
||||
|
||||
// `preemption_priorities` are the priority levels at which the global scheduler
|
||||
// preempts threads every 10 ms. They are ordered from Core 0 to Core 3
|
||||
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
|
||||
// ordered from Core 0 to Core 3.
|
||||
std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<SharedPtr<Thread>> thread_list;
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
class Scheduler final {
|
||||
public:
|
||||
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
|
||||
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id);
|
||||
~Scheduler();
|
||||
|
||||
/// Returns whether there are any threads that are ready to run.
|
||||
@@ -181,10 +197,8 @@ public:
|
||||
|
||||
private:
|
||||
friend class GlobalScheduler;
|
||||
/**
|
||||
* Switches the CPU's active thread context to that of the specified thread
|
||||
* @param new_thread The thread to switch to
|
||||
*/
|
||||
|
||||
/// Switches the CPU's active thread context to that of the specified thread
|
||||
void SwitchContext();
|
||||
|
||||
/**
|
||||
@@ -200,14 +214,14 @@ private:
|
||||
*/
|
||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||
|
||||
SharedPtr<Thread> current_thread = nullptr;
|
||||
SharedPtr<Thread> selected_thread = nullptr;
|
||||
std::shared_ptr<Thread> current_thread = nullptr;
|
||||
std::shared_ptr<Thread> selected_thread = nullptr;
|
||||
|
||||
Core::System& system;
|
||||
Core::ARM_Interface& cpu_core;
|
||||
u64 last_context_switch_time = 0;
|
||||
u64 idle_selection_count = 0;
|
||||
const u32 core_id;
|
||||
const std::size_t core_id;
|
||||
|
||||
bool is_context_switch_pending = false;
|
||||
};
|
||||
|
||||
@@ -16,7 +16,7 @@ namespace Kernel {
|
||||
ServerPort::ServerPort(KernelCore& kernel) : WaitObject{kernel} {}
|
||||
ServerPort::~ServerPort() = default;
|
||||
|
||||
ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() {
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
if (pending_sessions.empty()) {
|
||||
return ERR_NOT_FOUND;
|
||||
}
|
||||
@@ -26,7 +26,7 @@ ResultVal<SharedPtr<ServerSession>> ServerPort::Accept() {
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session) {
|
||||
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
|
||||
pending_sessions.push_back(std::move(pending_session));
|
||||
}
|
||||
|
||||
@@ -41,8 +41,8 @@ void ServerPort::Acquire(Thread* thread) {
|
||||
|
||||
ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions,
|
||||
std::string name) {
|
||||
SharedPtr<ServerPort> server_port(new ServerPort(kernel));
|
||||
SharedPtr<ClientPort> client_port(new ClientPort(kernel));
|
||||
std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel);
|
||||
std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel);
|
||||
|
||||
server_port->name = name + "_Server";
|
||||
client_port->name = name + "_Client";
|
||||
|
||||
@@ -22,8 +22,11 @@ class SessionRequestHandler;
|
||||
|
||||
class ServerPort final : public WaitObject {
|
||||
public:
|
||||
explicit ServerPort(KernelCore& kernel);
|
||||
~ServerPort() override;
|
||||
|
||||
using HLEHandler = std::shared_ptr<SessionRequestHandler>;
|
||||
using PortPair = std::pair<SharedPtr<ServerPort>, SharedPtr<ClientPort>>;
|
||||
using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>;
|
||||
|
||||
/**
|
||||
* Creates a pair of ServerPort and an associated ClientPort.
|
||||
@@ -52,7 +55,7 @@ public:
|
||||
* Accepts a pending incoming connection on this port. If there are no pending sessions, will
|
||||
* return ERR_NO_PENDING_SESSIONS.
|
||||
*/
|
||||
ResultVal<SharedPtr<ServerSession>> Accept();
|
||||
ResultVal<std::shared_ptr<ServerSession>> Accept();
|
||||
|
||||
/// Whether or not this server port has an HLE handler available.
|
||||
bool HasHLEHandler() const {
|
||||
@@ -74,17 +77,14 @@ public:
|
||||
|
||||
/// Appends a ServerSession to the collection of ServerSessions
|
||||
/// waiting to be accepted by this port.
|
||||
void AppendPendingSession(SharedPtr<ServerSession> pending_session);
|
||||
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
private:
|
||||
explicit ServerPort(KernelCore& kernel);
|
||||
~ServerPort() override;
|
||||
|
||||
/// ServerSessions waiting to be accepted by the port
|
||||
std::vector<SharedPtr<ServerSession>> pending_sessions;
|
||||
std::vector<std::shared_ptr<ServerSession>> pending_sessions;
|
||||
|
||||
/// This session's HLE request handler template (optional)
|
||||
/// ServerSessions created from this port inherit a reference to this handler.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
@@ -19,35 +20,32 @@
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerSession::ServerSession(KernelCore& kernel) : WaitObject{kernel} {}
|
||||
ServerSession::~ServerSession() {
|
||||
// This destructor will be called automatically when the last ServerSession handle is closed by
|
||||
// the emulated application.
|
||||
ServerSession::~ServerSession() = default;
|
||||
|
||||
// Decrease the port's connection count.
|
||||
if (parent->port) {
|
||||
parent->port->ConnectionClosed();
|
||||
}
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||
|
||||
parent->server = nullptr;
|
||||
}
|
||||
session->request_event = Core::Timing::CreateEvent(
|
||||
name, [session](u64 userdata, s64 cycles_late) { session->CompleteSyncRequest(); });
|
||||
session->name = std::move(name);
|
||||
session->parent = std::move(parent);
|
||||
|
||||
ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, std::string name) {
|
||||
SharedPtr<ServerSession> server_session(new ServerSession(kernel));
|
||||
|
||||
server_session->name = std::move(name);
|
||||
server_session->parent = nullptr;
|
||||
|
||||
return MakeResult(std::move(server_session));
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
bool ServerSession::ShouldWait(const Thread* thread) const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (parent->client == nullptr)
|
||||
if (!parent->Client()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait if we have no pending requests, or if we're currently handling a request.
|
||||
return pending_requesting_threads.empty() || currently_handling != nullptr;
|
||||
}
|
||||
@@ -69,7 +67,7 @@ void ServerSession::ClientDisconnected() {
|
||||
if (handler) {
|
||||
// Note that after this returns, this server session's hle_handler is
|
||||
// invalidated (set to null).
|
||||
handler->ClientDisconnected(this);
|
||||
handler->ClientDisconnected(SharedFrom(this));
|
||||
}
|
||||
|
||||
// Clean up the list of client threads with pending requests, they are unneeded now that the
|
||||
@@ -126,13 +124,21 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
|
||||
// The ServerSession received a sync request, this means that there's new data available
|
||||
// from its ClientSession, so wake up any threads that may be waiting on a svcReplyAndReceive or
|
||||
// similar.
|
||||
Kernel::HLERequestContext context(this, thread);
|
||||
u32* cmd_buf = (u32*)Memory::GetPointer(thread->GetTLSAddress());
|
||||
context.PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
std::shared_ptr<Kernel::HLERequestContext> context{
|
||||
std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))};
|
||||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
request_queue.Push(std::move(context));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::CompleteSyncRequest() {
|
||||
ASSERT(!request_queue.Empty());
|
||||
|
||||
auto& context = *request_queue.Front();
|
||||
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
@@ -144,61 +150,27 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) {
|
||||
result = hle_handler->HandleSyncRequest(context);
|
||||
}
|
||||
|
||||
if (thread->GetStatus() == ThreadStatus::Running) {
|
||||
// Put the thread to sleep until the server replies, it will be awoken in
|
||||
// svcReplyAndReceive for LLE servers.
|
||||
thread->SetStatus(ThreadStatus::WaitIPC);
|
||||
|
||||
if (hle_handler != nullptr) {
|
||||
// For HLE services, we put the request threads to sleep for a short duration to
|
||||
// simulate IPC overhead, but only if the HLE handler didn't put the thread to sleep for
|
||||
// other reasons like an async callback. The IPC overhead is needed to prevent
|
||||
// starvation when a thread only does sync requests to HLE services while a
|
||||
// lower-priority thread is waiting to run.
|
||||
|
||||
// This delay was approximated in a homebrew application by measuring the average time
|
||||
// it takes for svcSendSyncRequest to return when performing the SetLcdForceBlack IPC
|
||||
// request to the GSP:GPU service in a n3DS with firmware 11.6. The measured values have
|
||||
// a high variance and vary between models.
|
||||
static constexpr u64 IPCDelayNanoseconds = 39000;
|
||||
thread->WakeAfterDelay(IPCDelayNanoseconds);
|
||||
} else {
|
||||
// Add the thread to the list of threads that have issued a sync request with this
|
||||
// server.
|
||||
pending_requesting_threads.push_back(std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
// If this ServerSession does not have an HLE implementation, just wake up the threads waiting
|
||||
// on it.
|
||||
WakeupAllWaitingThreads();
|
||||
|
||||
// Handle scenario when ConvertToDomain command was issued, as we must do the conversion at the
|
||||
// end of the command such that only commands following this one are handled as domains
|
||||
if (convert_to_domain) {
|
||||
ASSERT_MSG(IsSession(), "ServerSession is already a domain instance.");
|
||||
domain_request_handlers = {hle_handler};
|
||||
convert_to_domain = false;
|
||||
}
|
||||
|
||||
// Some service requests require the thread to block
|
||||
if (!context.IsThreadWaiting()) {
|
||||
context.GetThread().ResumeFromWait();
|
||||
context.GetThread().SetWaitSynchronizationResult(result);
|
||||
}
|
||||
|
||||
request_queue.Pop();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ServerSession::SessionPair ServerSession::CreateSessionPair(KernelCore& kernel,
|
||||
const std::string& name,
|
||||
SharedPtr<ClientPort> port) {
|
||||
auto server_session = ServerSession::Create(kernel, name + "_Server").Unwrap();
|
||||
SharedPtr<ClientSession> client_session(new ClientSession(kernel));
|
||||
client_session->name = name + "_Client";
|
||||
|
||||
std::shared_ptr<Session> parent(new Session);
|
||||
parent->client = client_session.get();
|
||||
parent->server = server_session.get();
|
||||
parent->port = std::move(port);
|
||||
|
||||
client_session->parent = parent;
|
||||
server_session->parent = parent;
|
||||
|
||||
return std::make_pair(std::move(server_session), std::move(client_session));
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Memory::Memory& memory) {
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {});
|
||||
return QueueSyncRequest(std::move(thread), memory);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
@@ -9,17 +9,22 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/hle/kernel/wait_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Memory {
|
||||
class Memory;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
struct EventType;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientPort;
|
||||
class ClientSession;
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class ServerSession;
|
||||
class Session;
|
||||
class SessionRequestHandler;
|
||||
class Thread;
|
||||
@@ -38,6 +43,15 @@ class Thread;
|
||||
*/
|
||||
class ServerSession final : public WaitObject {
|
||||
public:
|
||||
explicit ServerSession(KernelCore& kernel);
|
||||
~ServerSession() override;
|
||||
|
||||
friend class Session;
|
||||
|
||||
static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name = "Unknown");
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "ServerSession";
|
||||
}
|
||||
@@ -59,18 +73,6 @@ public:
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
using SessionPair = std::pair<SharedPtr<ServerSession>, SharedPtr<ClientSession>>;
|
||||
|
||||
/**
|
||||
* Creates a pair of ServerSession and an associated ClientSession.
|
||||
* @param kernel The kernal instance to create the session pair under.
|
||||
* @param name Optional name of the ports.
|
||||
* @param client_port Optional The ClientPort that spawned this session.
|
||||
* @return The created session tuple
|
||||
*/
|
||||
static SessionPair CreateSessionPair(KernelCore& kernel, const std::string& name = "Unknown",
|
||||
SharedPtr<ClientPort> client_port = nullptr);
|
||||
|
||||
/**
|
||||
* Sets the HLE handler for the session. This handler will be called to service IPC requests
|
||||
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
|
||||
@@ -82,10 +84,13 @@ public:
|
||||
|
||||
/**
|
||||
* Handle a sync request from the emulated application.
|
||||
*
|
||||
* @param thread Thread that initiated the request.
|
||||
* @param memory Memory context to handle the sync request under.
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
@@ -118,18 +123,11 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
explicit ServerSession(KernelCore& kernel);
|
||||
~ServerSession() override;
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory);
|
||||
|
||||
/**
|
||||
* Creates a server session. The server session can have an optional HLE handler,
|
||||
* which will be invoked to handle the IPC requests that this session receives.
|
||||
* @param kernel The kernel instance to create this server session under.
|
||||
* @param name Optional name of the server session.
|
||||
* @return The created server session
|
||||
*/
|
||||
static ResultVal<SharedPtr<ServerSession>> Create(KernelCore& kernel,
|
||||
std::string name = "Unknown");
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest();
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
@@ -147,18 +145,24 @@ private:
|
||||
/// List of threads that are pending a response after a sync request. This list is processed in
|
||||
/// a LIFO manner, thus, the last request will be dispatched first.
|
||||
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
|
||||
std::vector<SharedPtr<Thread>> pending_requesting_threads;
|
||||
std::vector<std::shared_ptr<Thread>> pending_requesting_threads;
|
||||
|
||||
/// Thread whose request is currently being handled. A request is considered "handled" when a
|
||||
/// response is sent via svcReplyAndReceive.
|
||||
/// TODO(Subv): Find a better name for this.
|
||||
SharedPtr<Thread> currently_handling;
|
||||
std::shared_ptr<Thread> currently_handling;
|
||||
|
||||
/// When set to True, converts the session to a domain at the end of the command
|
||||
bool convert_to_domain{};
|
||||
|
||||
/// The name of this session (optional)
|
||||
std::string name;
|
||||
|
||||
/// Core timing event used to schedule the service request at some point in the future
|
||||
std::shared_ptr<Core::Timing::EventType> request_event;
|
||||
|
||||
/// Queue of scheduled service requests
|
||||
Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,12 +1,36 @@
|
||||
// Copyright 2015 Citra Emulator Project
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Session::Session() {}
|
||||
Session::~Session() {}
|
||||
Session::Session(KernelCore& kernel) : WaitObject{kernel} {}
|
||||
Session::~Session() = default;
|
||||
|
||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
auto session{std::make_shared<Session>(kernel)};
|
||||
auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()};
|
||||
auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()};
|
||||
|
||||
session->name = std::move(name);
|
||||
session->client = client_session;
|
||||
session->server = server_session;
|
||||
|
||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||
}
|
||||
|
||||
bool Session::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void Session::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,27 +1,64 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "core/hle/kernel/wait_object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ClientSession;
|
||||
class ClientPort;
|
||||
class ServerSession;
|
||||
|
||||
/**
|
||||
* Parent structure to link the client and server endpoints of a session with their associated
|
||||
* client port. The client port need not exist, as is the case for portless sessions like the
|
||||
* FS File and Directory sessions. When one of the endpoints of a session is destroyed, its
|
||||
* corresponding field in this structure will be set to nullptr.
|
||||
* client port.
|
||||
*/
|
||||
class Session final {
|
||||
class Session final : public WaitObject {
|
||||
public:
|
||||
ClientSession* client = nullptr; ///< The client endpoint of the session.
|
||||
ServerSession* server = nullptr; ///< The server endpoint of the session.
|
||||
SharedPtr<ClientPort> port; ///< The port that this session is associated with (optional).
|
||||
explicit Session(KernelCore& kernel);
|
||||
~Session() override;
|
||||
|
||||
using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>;
|
||||
|
||||
static SessionPair Create(KernelCore& kernel, std::string name = "Unknown");
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Session;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
std::shared_ptr<ClientSession> Client() {
|
||||
if (auto result{client.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<ServerSession> Server() {
|
||||
if (auto result{server.lock()}) {
|
||||
return result;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::weak_ptr<ClientSession> client;
|
||||
std::weak_ptr<ServerSession> server;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -15,11 +15,12 @@ namespace Kernel {
|
||||
SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {}
|
||||
SharedMemory::~SharedMemory() = default;
|
||||
|
||||
SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, u64 size,
|
||||
MemoryPermission permissions,
|
||||
MemoryPermission other_permissions, VAddr address,
|
||||
MemoryRegion region, std::string name) {
|
||||
SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel));
|
||||
std::shared_ptr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process,
|
||||
u64 size, MemoryPermission permissions,
|
||||
MemoryPermission other_permissions,
|
||||
VAddr address, MemoryRegion region,
|
||||
std::string name) {
|
||||
std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel);
|
||||
|
||||
shared_memory->owner_process = owner_process;
|
||||
shared_memory->name = std::move(name);
|
||||
@@ -58,10 +59,10 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_
|
||||
return shared_memory;
|
||||
}
|
||||
|
||||
SharedPtr<SharedMemory> SharedMemory::CreateForApplet(
|
||||
std::shared_ptr<SharedMemory> SharedMemory::CreateForApplet(
|
||||
KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset,
|
||||
u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) {
|
||||
SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel));
|
||||
std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel);
|
||||
|
||||
shared_memory->owner_process = nullptr;
|
||||
shared_memory->name = std::move(name);
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
@@ -33,6 +32,9 @@ enum class MemoryPermission : u32 {
|
||||
|
||||
class SharedMemory final : public Object {
|
||||
public:
|
||||
explicit SharedMemory(KernelCore& kernel);
|
||||
~SharedMemory() override;
|
||||
|
||||
/**
|
||||
* Creates a shared memory object.
|
||||
* @param kernel The kernel instance to create a shared memory instance under.
|
||||
@@ -46,11 +48,12 @@ public:
|
||||
* linear heap.
|
||||
* @param name Optional object name, used for debugging purposes.
|
||||
*/
|
||||
static SharedPtr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, u64 size,
|
||||
MemoryPermission permissions,
|
||||
MemoryPermission other_permissions, VAddr address = 0,
|
||||
MemoryRegion region = MemoryRegion::BASE,
|
||||
std::string name = "Unknown");
|
||||
static std::shared_ptr<SharedMemory> Create(KernelCore& kernel, Process* owner_process,
|
||||
u64 size, MemoryPermission permissions,
|
||||
MemoryPermission other_permissions,
|
||||
VAddr address = 0,
|
||||
MemoryRegion region = MemoryRegion::BASE,
|
||||
std::string name = "Unknown");
|
||||
|
||||
/**
|
||||
* Creates a shared memory object from a block of memory managed by an HLE applet.
|
||||
@@ -63,7 +66,7 @@ public:
|
||||
* block.
|
||||
* @param name Optional object name, used for debugging purposes.
|
||||
*/
|
||||
static SharedPtr<SharedMemory> CreateForApplet(
|
||||
static std::shared_ptr<SharedMemory> CreateForApplet(
|
||||
KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset,
|
||||
u64 size, MemoryPermission permissions, MemoryPermission other_permissions,
|
||||
std::string name = "Unknown Applet");
|
||||
@@ -130,9 +133,6 @@ public:
|
||||
const u8* GetPointer(std::size_t offset = 0) const;
|
||||
|
||||
private:
|
||||
explicit SharedMemory(KernelCore& kernel);
|
||||
~SharedMemory() override;
|
||||
|
||||
/// Backing memory for this shared memory block.
|
||||
std::shared_ptr<PhysicalMemory> backing_block;
|
||||
/// Offset into the backing block for this shared memory.
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "core/core.h"
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
@@ -331,7 +332,9 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
|
||||
/// Connect to an OS service given the port name, returns the handle to the port to out
|
||||
static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
||||
VAddr port_name_address) {
|
||||
if (!Memory::IsValidVirtualAddress(port_name_address)) {
|
||||
auto& memory = system.Memory();
|
||||
|
||||
if (!memory.IsValidVirtualAddress(port_name_address)) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
|
||||
port_name_address);
|
||||
@@ -340,7 +343,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
||||
|
||||
static constexpr std::size_t PortNameMaxLength = 11;
|
||||
// Read 1 char beyond the max allowed port name to detect names that are too long.
|
||||
std::string port_name = Memory::ReadCString(port_name_address, PortNameMaxLength + 1);
|
||||
const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
|
||||
if (port_name.size() > PortNameMaxLength) {
|
||||
LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
|
||||
port_name.size());
|
||||
@@ -358,7 +361,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
||||
|
||||
auto client_port = it->second;
|
||||
|
||||
SharedPtr<ClientSession> client_session;
|
||||
std::shared_ptr<ClientSession> client_session;
|
||||
CASCADE_RESULT(client_session, client_port->Connect());
|
||||
|
||||
// Return the client session
|
||||
@@ -370,7 +373,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle,
|
||||
/// Makes a blocking IPC call to an OS service.
|
||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
SharedPtr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
||||
std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
|
||||
if (!session) {
|
||||
LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -378,11 +381,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
|
||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||
|
||||
system.PrepareReschedule();
|
||||
auto thread = system.CurrentScheduler().GetCurrentThread();
|
||||
thread->InvalidateWakeupCallback();
|
||||
thread->SetStatus(ThreadStatus::WaitIPC);
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
|
||||
// TODO(Subv): svcSendSyncRequest should put the caller thread to sleep while the server
|
||||
// responds and cause a reschedule.
|
||||
return session->SendSyncRequest(system.CurrentScheduler().GetCurrentThread());
|
||||
return session->SendSyncRequest(SharedFrom(thread), system.Memory());
|
||||
}
|
||||
|
||||
/// Get the ID for the specified thread.
|
||||
@@ -390,7 +394,7 @@ static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle threa
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -405,13 +409,13 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
|
||||
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const SharedPtr<Process> process = handle_table.Get<Process>(handle);
|
||||
const std::shared_ptr<Process> process = handle_table.Get<Process>(handle);
|
||||
if (process) {
|
||||
*process_id = process->GetProcessID();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
if (thread) {
|
||||
const Process* const owner_process = thread->GetOwnerProcess();
|
||||
if (!owner_process) {
|
||||
@@ -430,8 +434,8 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
|
||||
}
|
||||
|
||||
/// Default thread wakeup callback for WaitSynchronization
|
||||
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) {
|
||||
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object, std::size_t index) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
||||
|
||||
if (reason == ThreadWakeupReason::Timeout) {
|
||||
@@ -451,7 +455,8 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
||||
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
||||
handles_address, handle_count, nano_seconds);
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(handles_address)) {
|
||||
auto& memory = system.Memory();
|
||||
if (!memory.IsValidVirtualAddress(handles_address)) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Handle address is not a valid virtual address, handle_address=0x{:016X}",
|
||||
handles_address);
|
||||
@@ -473,7 +478,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
|
||||
for (u64 i = 0; i < handle_count; ++i) {
|
||||
const Handle handle = Memory::Read32(handles_address + i * sizeof(Handle));
|
||||
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
|
||||
const auto object = handle_table.Get<WaitObject>(handle);
|
||||
|
||||
if (object == nullptr) {
|
||||
@@ -505,8 +510,13 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
if (thread->IsSyncCancelled()) {
|
||||
thread->SetSyncCancelled(false);
|
||||
return ERR_SYNCHRONIZATION_CANCELED;
|
||||
}
|
||||
|
||||
for (auto& object : objects) {
|
||||
object->AddWaitingThread(thread);
|
||||
object->AddWaitingThread(SharedFrom(thread));
|
||||
}
|
||||
|
||||
thread->SetWaitObjects(std::move(objects));
|
||||
@@ -526,7 +536,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
|
||||
LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@@ -610,13 +620,15 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// This typically is an error code so we're going to assume this is the case
|
||||
if (sz == sizeof(u32)) {
|
||||
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", Memory::Read32(addr));
|
||||
LOG_CRITICAL(Debug_Emulated, "debug_buffer_err_code={:X}", memory.Read32(addr));
|
||||
} else {
|
||||
// We don't know what's in here so we'll hexdump it
|
||||
debug_buffer.resize(sz);
|
||||
Memory::ReadBlock(addr, debug_buffer.data(), sz);
|
||||
memory.ReadBlock(addr, debug_buffer.data(), sz);
|
||||
std::string hexdump;
|
||||
for (std::size_t i = 0; i < debug_buffer.size(); i++) {
|
||||
hexdump += fmt::format("{:02X} ", debug_buffer[i]);
|
||||
@@ -706,7 +718,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre
|
||||
}
|
||||
|
||||
std::string str(len, '\0');
|
||||
Memory::ReadBlock(address, str.data(), str.size());
|
||||
system.Memory().ReadBlock(address, str.data(), str.size());
|
||||
LOG_DEBUG(Debug_Emulated, "{}", str);
|
||||
}
|
||||
|
||||
@@ -935,7 +947,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
|
||||
const auto& core_timing = system.CoreTiming();
|
||||
const auto& scheduler = system.CurrentScheduler();
|
||||
const auto* const current_thread = scheduler.GetCurrentThread();
|
||||
const bool same_thread = current_thread == thread;
|
||||
const bool same_thread = current_thread == thread.get();
|
||||
|
||||
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
|
||||
u64 out_ticks = 0;
|
||||
@@ -1045,7 +1057,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
|
||||
}
|
||||
|
||||
const auto* current_process = system.Kernel().CurrentProcess();
|
||||
const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -1061,7 +1073,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
if (thread == system.CurrentScheduler().GetCurrentThread()) {
|
||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
|
||||
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
||||
return ERR_BUSY;
|
||||
}
|
||||
@@ -1077,7 +1089,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
|
||||
LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle);
|
||||
|
||||
const auto* current_process = system.Kernel().CurrentProcess();
|
||||
const SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -1093,7 +1105,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
if (thread == system.CurrentScheduler().GetCurrentThread()) {
|
||||
if (thread.get() == system.CurrentScheduler().GetCurrentThread()) {
|
||||
LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
|
||||
return ERR_BUSY;
|
||||
}
|
||||
@@ -1109,7 +1121,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
|
||||
std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
|
||||
}
|
||||
|
||||
Memory::WriteBlock(thread_context, &ctx, sizeof(ctx));
|
||||
system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1118,7 +1130,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle
|
||||
LOG_TRACE(Kernel_SVC, "called");
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -1142,7 +1154,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
|
||||
|
||||
const auto* const current_process = system.Kernel().CurrentProcess();
|
||||
|
||||
SharedPtr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
@@ -1262,27 +1274,28 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
|
||||
VAddr address) {
|
||||
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
SharedPtr<Process> process = handle_table.Get<Process>(process_handle);
|
||||
std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle);
|
||||
if (!process) {
|
||||
LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}",
|
||||
process_handle);
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
auto& memory = system.Memory();
|
||||
const auto& vm_manager = process->VMManager();
|
||||
const MemoryInfo memory_info = vm_manager.QueryMemory(address);
|
||||
|
||||
Memory::Write64(memory_info_address, memory_info.base_address);
|
||||
Memory::Write64(memory_info_address + 8, memory_info.size);
|
||||
Memory::Write32(memory_info_address + 16, memory_info.state);
|
||||
Memory::Write32(memory_info_address + 20, memory_info.attributes);
|
||||
Memory::Write32(memory_info_address + 24, memory_info.permission);
|
||||
Memory::Write32(memory_info_address + 32, memory_info.ipc_ref_count);
|
||||
Memory::Write32(memory_info_address + 28, memory_info.device_ref_count);
|
||||
Memory::Write32(memory_info_address + 36, 0);
|
||||
memory.Write64(memory_info_address, memory_info.base_address);
|
||||
memory.Write64(memory_info_address + 8, memory_info.size);
|
||||
memory.Write32(memory_info_address + 16, memory_info.state);
|
||||
memory.Write32(memory_info_address + 20, memory_info.attributes);
|
||||
memory.Write32(memory_info_address + 24, memory_info.permission);
|
||||
memory.Write32(memory_info_address + 32, memory_info.ipc_ref_count);
|
||||
memory.Write32(memory_info_address + 28, memory_info.device_ref_count);
|
||||
memory.Write32(memory_info_address + 36, 0);
|
||||
|
||||
// Page info appears to be currently unused by the kernel and is always set to zero.
|
||||
Memory::Write32(page_info_address, 0);
|
||||
memory.Write32(page_info_address, 0);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -1490,7 +1503,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
CASCADE_RESULT(SharedPtr<Thread> thread,
|
||||
CASCADE_RESULT(std::shared_ptr<Thread> thread,
|
||||
Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top,
|
||||
*current_process));
|
||||
|
||||
@@ -1516,7 +1529,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@@ -1540,7 +1553,7 @@ static void ExitThread(Core::System& system) {
|
||||
|
||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
current_thread->Stop();
|
||||
system.GlobalScheduler().RemoveThread(current_thread);
|
||||
system.GlobalScheduler().RemoveThread(SharedFrom(current_thread));
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
||||
@@ -1612,7 +1625,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
ASSERT(thread);
|
||||
|
||||
const auto release_result = current_process->GetMutex().Release(mutex_addr);
|
||||
@@ -1620,12 +1633,13 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||
return release_result;
|
||||
}
|
||||
|
||||
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
Thread* current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
||||
current_thread->SetMutexWaitAddress(mutex_addr);
|
||||
current_thread->SetWaitHandle(thread_handle);
|
||||
current_thread->SetStatus(ThreadStatus::WaitCondVar);
|
||||
current_thread->InvalidateWakeupCallback();
|
||||
current_process->InsertConditionVariableThread(SharedFrom(current_thread));
|
||||
|
||||
current_thread->WakeAfterDelay(nano_seconds);
|
||||
|
||||
@@ -1636,50 +1650,35 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||
}
|
||||
|
||||
/// Signal process wide key
|
||||
static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr,
|
||||
s32 target) {
|
||||
static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) {
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
|
||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||
std::vector<SharedPtr<Thread>> waiting_threads;
|
||||
const auto& scheduler = system.GlobalScheduler();
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
current_process->GetConditionVariableThreads(condition_variable_addr);
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetCondVarWaitAddress() == condition_variable_addr) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(waiting_threads.begin(), waiting_threads.end(),
|
||||
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
||||
return lhs->GetPriority() < rhs->GetPriority();
|
||||
});
|
||||
|
||||
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
||||
// Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (target != -1)
|
||||
if (target > 0)
|
||||
last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
|
||||
|
||||
// If there are no threads waiting on this condition variable, just exit
|
||||
if (last == 0)
|
||||
return RESULT_SUCCESS;
|
||||
|
||||
for (std::size_t index = 0; index < last; ++index) {
|
||||
auto& thread = waiting_threads[index];
|
||||
|
||||
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
||||
|
||||
// liberate Cond Var Thread.
|
||||
current_process->RemoveConditionVariableThread(thread);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Atomically read the value of the mutex.
|
||||
u32 mutex_val = 0;
|
||||
@@ -1689,7 +1688,7 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
monitor.SetExclusive(current_core, mutex_address);
|
||||
|
||||
// If the mutex is not yet acquired, acquire it.
|
||||
mutex_val = Memory::Read32(mutex_address);
|
||||
mutex_val = memory.Read32(mutex_address);
|
||||
|
||||
if (mutex_val != 0) {
|
||||
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
|
||||
@@ -1726,8 +1725,6 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
}
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
// Wait for an address (via Address Arbiter)
|
||||
@@ -1781,12 +1778,25 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
|
||||
}
|
||||
|
||||
static void KernelDebug([[maybe_unused]] Core::System& system,
|
||||
[[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1,
|
||||
[[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) {
|
||||
// Intentionally do nothing, as this does nothing in released kernel binaries.
|
||||
}
|
||||
|
||||
static void ChangeKernelTraceState([[maybe_unused]] Core::System& system,
|
||||
[[maybe_unused]] u32 trace_state) {
|
||||
// Intentionally do nothing, as this does nothing in released kernel binaries.
|
||||
}
|
||||
|
||||
/// This returns the total CPU ticks elapsed since the CPU was powered-on
|
||||
static u64 GetSystemTick(Core::System& system) {
|
||||
LOG_TRACE(Kernel_SVC, "called");
|
||||
|
||||
auto& core_timing = system.CoreTiming();
|
||||
const u64 result{core_timing.GetTicks()};
|
||||
|
||||
// Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick)
|
||||
const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())};
|
||||
|
||||
// Advance time to defeat dumb games that busy-wait for the frame to end.
|
||||
core_timing.AddTicks(400);
|
||||
@@ -1975,7 +1985,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
|
||||
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@@ -2034,7 +2044,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
}
|
||||
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
const SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
if (!thread) {
|
||||
LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
|
||||
thread_handle);
|
||||
@@ -2099,7 +2109,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
const auto [readable_event, writable_event] =
|
||||
WritableEvent::CreateEventPair(kernel, ResetType::Manual, "CreateEvent");
|
||||
WritableEvent::CreateEventPair(kernel, "CreateEvent");
|
||||
|
||||
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
|
||||
@@ -2290,12 +2300,13 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
auto& memory = system.Memory();
|
||||
const auto& process_list = kernel.GetProcessList();
|
||||
const auto num_processes = process_list.size();
|
||||
const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
|
||||
|
||||
for (std::size_t i = 0; i < copy_amount; ++i) {
|
||||
Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
|
||||
memory.Write64(out_process_ids, process_list[i]->GetProcessID());
|
||||
out_process_ids += sizeof(u64);
|
||||
}
|
||||
|
||||
@@ -2329,13 +2340,14 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
auto& memory = system.Memory();
|
||||
const auto& thread_list = current_process->GetThreadList();
|
||||
const auto num_threads = thread_list.size();
|
||||
const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
|
||||
|
||||
auto list_iter = thread_list.cbegin();
|
||||
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
|
||||
Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
|
||||
memory.Write64(out_thread_ids, (*list_iter)->GetThreadID());
|
||||
out_thread_ids += sizeof(u64);
|
||||
}
|
||||
|
||||
@@ -2414,8 +2426,8 @@ static const FunctionDef SVC_Table[] = {
|
||||
{0x39, nullptr, "Unknown"},
|
||||
{0x3A, nullptr, "Unknown"},
|
||||
{0x3B, nullptr, "Unknown"},
|
||||
{0x3C, nullptr, "DumpInfo"},
|
||||
{0x3D, nullptr, "DumpInfoNew"},
|
||||
{0x3C, SvcWrap<KernelDebug>, "KernelDebug"},
|
||||
{0x3D, SvcWrap<ChangeKernelTraceState>, "ChangeKernelTraceState"},
|
||||
{0x3E, nullptr, "Unknown"},
|
||||
{0x3F, nullptr, "Unknown"},
|
||||
{0x40, nullptr, "CreateSession"},
|
||||
|
||||
@@ -112,11 +112,6 @@ void SvcWrap(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, s32)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<s32>(Param(system, 1))).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
|
||||
@@ -311,11 +306,27 @@ void SvcWrap(Core::System& system) {
|
||||
func(system);
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u32)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u32, u64, u64, u64)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2),
|
||||
Param(system, 3));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, s64)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
func(system, static_cast<s64>(Param(system, 0)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u64, s32)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
func(system, Param(system, 0), static_cast<s32>(Param(system, 1)));
|
||||
}
|
||||
|
||||
template <void func(Core::System&, u64, u64)>
|
||||
void SvcWrap(Core::System& system) {
|
||||
func(system, Param(system, 0), Param(system, 1));
|
||||
|
||||
@@ -50,7 +50,7 @@ void Thread::Stop() {
|
||||
|
||||
// Clean up any dangling references in objects that this thread was waiting for
|
||||
for (auto& wait_object : wait_objects) {
|
||||
wait_object->RemoveWaitingThread(this);
|
||||
wait_object->RemoveWaitingThread(SharedFrom(this));
|
||||
}
|
||||
wait_objects.clear();
|
||||
|
||||
@@ -77,18 +77,6 @@ void Thread::CancelWakeupTimer() {
|
||||
callback_handle);
|
||||
}
|
||||
|
||||
static std::optional<s32> GetNextProcessorId(u64 mask) {
|
||||
for (s32 index = 0; index < Core::NUM_CPU_CORES; ++index) {
|
||||
if (mask & (1ULL << index)) {
|
||||
if (!Core::System::GetInstance().Scheduler(index).GetCurrentThread()) {
|
||||
// Core is enabled and not running any threads, use this one
|
||||
return index;
|
||||
}
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
void Thread::ResumeFromWait() {
|
||||
ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects");
|
||||
|
||||
@@ -132,8 +120,11 @@ void Thread::ResumeFromWait() {
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
ASSERT(GetStatus() == ThreadStatus::WaitSynch);
|
||||
ClearWaitObjects();
|
||||
if (GetSchedulingStatus() != ThreadSchedStatus::Paused) {
|
||||
is_sync_cancelled = true;
|
||||
return;
|
||||
}
|
||||
is_sync_cancelled = false;
|
||||
SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
|
||||
ResumeFromWait();
|
||||
}
|
||||
@@ -156,9 +147,10 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd
|
||||
context.fpcr = 0x03C00000;
|
||||
}
|
||||
|
||||
ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id,
|
||||
VAddr stack_top, Process& owner_process) {
|
||||
ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name,
|
||||
VAddr entry_point, u32 priority, u64 arg,
|
||||
s32 processor_id, VAddr stack_top,
|
||||
Process& owner_process) {
|
||||
// Check if priority is in ranged. Lowest priority -> highest priority id.
|
||||
if (priority > THREADPRIO_LOWEST) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
|
||||
@@ -170,14 +162,14 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
|
||||
return ERR_INVALID_PROCESSOR_ID;
|
||||
}
|
||||
|
||||
if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) {
|
||||
auto& system = Core::System::GetInstance();
|
||||
if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) {
|
||||
LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
|
||||
// TODO (bunnei): Find the correct error code to use here
|
||||
return ResultCode(-1);
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
SharedPtr<Thread> thread(new Thread(kernel));
|
||||
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
|
||||
|
||||
thread->thread_id = kernel.CreateNewThreadID();
|
||||
thread->status = ThreadStatus::Dormant;
|
||||
@@ -206,7 +198,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
|
||||
// to initialize the context
|
||||
ResetThreadContext(thread->context, stack_top, entry_point, arg);
|
||||
|
||||
return MakeResult<SharedPtr<Thread>>(std::move(thread));
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetPriority(u32 priority) {
|
||||
@@ -224,7 +216,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
|
||||
context.cpu_registers[1] = output;
|
||||
}
|
||||
|
||||
s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
|
||||
s32 Thread::GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const {
|
||||
ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
|
||||
const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
|
||||
return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
|
||||
@@ -264,8 +256,8 @@ void Thread::SetStatus(ThreadStatus new_status) {
|
||||
status = new_status;
|
||||
}
|
||||
|
||||
void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
|
||||
if (thread->lock_owner == this) {
|
||||
void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
if (thread->lock_owner.get() == this) {
|
||||
// If the thread is already waiting for this thread to release the mutex, ensure that the
|
||||
// waiters list is consistent and return without doing anything.
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
@@ -285,13 +277,13 @@ void Thread::AddMutexWaiter(SharedPtr<Thread> thread) {
|
||||
wait_mutex_threads.begin(), wait_mutex_threads.end(),
|
||||
[&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
|
||||
wait_mutex_threads.insert(insertion_point, thread);
|
||||
thread->lock_owner = this;
|
||||
thread->lock_owner = SharedFrom(this);
|
||||
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::RemoveMutexWaiter(SharedPtr<Thread> thread) {
|
||||
ASSERT(thread->lock_owner == this);
|
||||
void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(thread->lock_owner.get() == this);
|
||||
|
||||
// Ensure that the thread is in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
@@ -318,16 +310,24 @@ void Thread::UpdatePriority() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->RemoveConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
SetCurrentPriority(new_priority);
|
||||
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->InsertConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
if (!lock_owner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the thread is within the correct location in the waiting list.
|
||||
auto old_owner = lock_owner;
|
||||
lock_owner->RemoveMutexWaiter(this);
|
||||
old_owner->AddMutexWaiter(this);
|
||||
lock_owner->RemoveMutexWaiter(SharedFrom(this));
|
||||
old_owner->AddMutexWaiter(SharedFrom(this));
|
||||
|
||||
// Recursively update the priority of the thread that depends on the priority of this one.
|
||||
lock_owner->UpdatePriority();
|
||||
@@ -340,11 +340,11 @@ void Thread::ChangeCore(u32 core, u64 mask) {
|
||||
bool Thread::AllWaitObjectsReady() const {
|
||||
return std::none_of(
|
||||
wait_objects.begin(), wait_objects.end(),
|
||||
[this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
|
||||
[this](const std::shared_ptr<WaitObject>& object) { return object->ShouldWait(this); });
|
||||
}
|
||||
|
||||
bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) {
|
||||
bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object, std::size_t index) {
|
||||
ASSERT(wakeup_callback);
|
||||
return wakeup_callback(reason, std::move(thread), std::move(object), index);
|
||||
}
|
||||
@@ -401,7 +401,7 @@ void Thread::SetCurrentPriority(u32 new_priority) {
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
for (s32 core = max_cores - 1; core >= 0; core--) {
|
||||
for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
|
||||
if (((mask >> core) & 1) != 0) {
|
||||
return core;
|
||||
}
|
||||
@@ -425,7 +425,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
if (old_affinity_mask != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
|
||||
if (ideal_core < 0) {
|
||||
if (static_cast<s32>(ideal_core) < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
@@ -447,23 +447,23 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
|
||||
ThreadSchedStatus::Runnable) {
|
||||
// In this case the thread was running, now it's pausing/exitting
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Unschedule(current_priority, processor_id, this);
|
||||
scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this);
|
||||
}
|
||||
|
||||
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Unsuggest(current_priority, static_cast<u32>(core), this);
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Unsuggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
} else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
|
||||
// The thread is now set to running from being stopped
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Schedule(current_priority, processor_id, this);
|
||||
scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
|
||||
}
|
||||
|
||||
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Suggest(current_priority, static_cast<u32>(core), this);
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Suggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -477,11 +477,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
|
||||
}
|
||||
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Unschedule(old_priority, processor_id, this);
|
||||
scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this);
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Unsuggest(old_priority, core, this);
|
||||
}
|
||||
}
|
||||
@@ -491,14 +491,14 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
|
||||
|
||||
if (processor_id >= 0) {
|
||||
if (current_thread == this) {
|
||||
scheduler.SchedulePrepend(current_priority, processor_id, this);
|
||||
scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this);
|
||||
} else {
|
||||
scheduler.Schedule(current_priority, processor_id, this);
|
||||
scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this);
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Suggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
@@ -515,7 +515,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (((old_affinity_mask >> core) & 1) != 0) {
|
||||
if (core == old_core) {
|
||||
if (core == static_cast<u32>(old_core)) {
|
||||
scheduler.Unschedule(current_priority, core, this);
|
||||
} else {
|
||||
scheduler.Unsuggest(current_priority, core, this);
|
||||
@@ -525,7 +525,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (((affinity_mask >> core) & 1) != 0) {
|
||||
if (core == processor_id) {
|
||||
if (core == static_cast<u32>(processor_id)) {
|
||||
scheduler.Schedule(current_priority, core, this);
|
||||
} else {
|
||||
scheduler.Suggest(current_priority, core, this);
|
||||
|
||||
@@ -97,14 +97,18 @@ enum class ThreadSchedMasks : u32 {
|
||||
|
||||
class Thread final : public WaitObject {
|
||||
public:
|
||||
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
|
||||
|
||||
using ThreadContext = Core::ARM_Interface::ThreadContext;
|
||||
|
||||
using ThreadWaitObjects = std::vector<SharedPtr<WaitObject>>;
|
||||
using ThreadWaitObjects = std::vector<std::shared_ptr<WaitObject>>;
|
||||
|
||||
using WakeupCallback = std::function<bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index)>;
|
||||
using WakeupCallback =
|
||||
std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object, std::size_t index)>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
@@ -118,10 +122,10 @@ public:
|
||||
* @param owner_process The parent process for the thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
static ResultVal<SharedPtr<Thread>> Create(KernelCore& kernel, std::string name,
|
||||
VAddr entry_point, u32 priority, u64 arg,
|
||||
s32 processor_id, VAddr stack_top,
|
||||
Process& owner_process);
|
||||
static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name,
|
||||
VAddr entry_point, u32 priority, u64 arg,
|
||||
s32 processor_id, VAddr stack_top,
|
||||
Process& owner_process);
|
||||
|
||||
std::string GetName() const override {
|
||||
return name;
|
||||
@@ -166,10 +170,10 @@ public:
|
||||
void SetPriority(u32 priority);
|
||||
|
||||
/// Adds a thread to the list of threads that are waiting for a lock held by this thread.
|
||||
void AddMutexWaiter(SharedPtr<Thread> thread);
|
||||
void AddMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the list of threads that are waiting for a lock held by this thread.
|
||||
void RemoveMutexWaiter(SharedPtr<Thread> thread);
|
||||
void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Recalculates the current priority taking into account priority inheritance.
|
||||
void UpdatePriority();
|
||||
@@ -229,7 +233,7 @@ public:
|
||||
*
|
||||
* @param object Object to query the index of.
|
||||
*/
|
||||
s32 GetWaitObjectIndex(const WaitObject* object) const;
|
||||
s32 GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const;
|
||||
|
||||
/**
|
||||
* Stops a thread, invalidating it from further use
|
||||
@@ -320,7 +324,7 @@ public:
|
||||
|
||||
void ClearWaitObjects() {
|
||||
for (const auto& waiting_object : wait_objects) {
|
||||
waiting_object->RemoveWaitingThread(this);
|
||||
waiting_object->RemoveWaitingThread(SharedFrom(this));
|
||||
}
|
||||
wait_objects.clear();
|
||||
}
|
||||
@@ -336,7 +340,7 @@ public:
|
||||
return lock_owner.get();
|
||||
}
|
||||
|
||||
void SetLockOwner(SharedPtr<Thread> owner) {
|
||||
void SetLockOwner(std::shared_ptr<Thread> owner) {
|
||||
lock_owner = std::move(owner);
|
||||
}
|
||||
|
||||
@@ -390,8 +394,8 @@ public:
|
||||
* @pre A valid wakeup callback has been set. Violating this precondition
|
||||
* will cause an assertion to trigger.
|
||||
*/
|
||||
bool InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index);
|
||||
bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread,
|
||||
std::shared_ptr<WaitObject> object, std::size_t index);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
@@ -440,10 +444,15 @@ public:
|
||||
is_running = value;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
bool IsSyncCancelled() const {
|
||||
return is_sync_cancelled;
|
||||
}
|
||||
|
||||
void SetSyncCancelled(bool value) {
|
||||
is_sync_cancelled = value;
|
||||
}
|
||||
|
||||
private:
|
||||
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
||||
void SetCurrentPriority(u32 new_priority);
|
||||
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
@@ -491,7 +500,7 @@ private:
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
SharedPtr<Thread> lock_owner;
|
||||
std::shared_ptr<Thread> lock_owner;
|
||||
|
||||
/// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address = 0;
|
||||
@@ -524,6 +533,7 @@ private:
|
||||
|
||||
u32 scheduling_state = 0;
|
||||
bool is_running = false;
|
||||
bool is_sync_cancelled = false;
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user