Compare commits
407 Commits
mainline-0
...
mainline-0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7bc63c6e27 | ||
|
|
61bf850f3d | ||
|
|
1498a7c9a8 | ||
|
|
3a804752cb | ||
|
|
ea4f62615e | ||
|
|
546af64340 | ||
|
|
eba3c59a61 | ||
|
|
18175c71ed | ||
|
|
ff3c7c068b | ||
|
|
6bf80dfee0 | ||
|
|
e9446d232f | ||
|
|
4577dcd5f9 | ||
|
|
3f942c01f0 | ||
|
|
e86a7e3691 | ||
|
|
b0727c90c5 | ||
|
|
741dc13c5a | ||
|
|
00cb631b2f | ||
|
|
756365386a | ||
|
|
b944edc85d | ||
|
|
31e6e58101 | ||
|
|
53aec1fe2d | ||
|
|
eb3afd30b1 | ||
|
|
806e2d7900 | ||
|
|
b331bb5210 | ||
|
|
8019b2b9b5 | ||
|
|
9a9e81f2e9 | ||
|
|
f30ef98761 | ||
|
|
cde532cc52 | ||
|
|
c1b81f776c | ||
|
|
b0c9752663 | ||
|
|
2c6e940493 | ||
|
|
48d040fded | ||
|
|
e5a76d728f | ||
|
|
82d232db46 | ||
|
|
13b08376b7 | ||
|
|
9137f3ec68 | ||
|
|
fc43eac82a | ||
|
|
d4ebc9a120 | ||
|
|
64c3582705 | ||
|
|
9e4b2d60bc | ||
|
|
5e49b81d4d | ||
|
|
821fc4a7b6 | ||
|
|
f317b0d354 | ||
|
|
f614d7d887 | ||
|
|
9fc7f60b94 | ||
|
|
67d08f14af | ||
|
|
2489547dc5 | ||
|
|
1498ece290 | ||
|
|
f7dc77e03a | ||
|
|
8c7d89e6c7 | ||
|
|
7d9465d47a | ||
|
|
2394807b42 | ||
|
|
4bf1cf1f81 | ||
|
|
70a2065828 | ||
|
|
ec51f8de82 | ||
|
|
9f6b35e61f | ||
|
|
ded094a340 | ||
|
|
1cc009a996 | ||
|
|
9914db8daa | ||
|
|
94f660d1cb | ||
|
|
6cc769065d | ||
|
|
360ea64738 | ||
|
|
ad146eca60 | ||
|
|
99dcf7da21 | ||
|
|
ee333e063d | ||
|
|
56742c6222 | ||
|
|
7791cfd960 | ||
|
|
3bf62c7a8a | ||
|
|
3be1a565f8 | ||
|
|
a4526c4e1a | ||
|
|
5861bacafd | ||
|
|
f67cfebada | ||
|
|
aaf834ffa6 | ||
|
|
9150b8972e | ||
|
|
650734cc3e | ||
|
|
15d3376399 | ||
|
|
ef8b3623f5 | ||
|
|
b786568c5a | ||
|
|
543e212554 | ||
|
|
8d1afcb90f | ||
|
|
c8fe8247ee | ||
|
|
e24c6dab93 | ||
|
|
10738839ad | ||
|
|
3856564727 | ||
|
|
ff46ef7ea3 | ||
|
|
6ee8340a6b | ||
|
|
055194d2ab | ||
|
|
091e9e8c41 | ||
|
|
6e953f7f02 | ||
|
|
37f74d8741 | ||
|
|
f6b10fad63 | ||
|
|
0a1449e04b | ||
|
|
89a5ae92bd | ||
|
|
ca78f77827 | ||
|
|
cdd14b03e5 | ||
|
|
1470338458 | ||
|
|
1772ebeb1e | ||
|
|
1f99f5473c | ||
|
|
c0f5830323 | ||
|
|
bb966d3e33 | ||
|
|
ff186b2498 | ||
|
|
33b4930280 | ||
|
|
5a4fc4a529 | ||
|
|
97129bc742 | ||
|
|
2fb77adb9f | ||
|
|
4dbf3f4880 | ||
|
|
1e55498110 | ||
|
|
0530292b97 | ||
|
|
4782985013 | ||
|
|
eea346ba8e | ||
|
|
9a4e148f9e | ||
|
|
c0d3aef28c | ||
|
|
8ba0cac71c | ||
|
|
df41e78205 | ||
|
|
7b8fa78c65 | ||
|
|
c21ce728c2 | ||
|
|
16818e952c | ||
|
|
9f6290d207 | ||
|
|
0e125dfd43 | ||
|
|
4921ba05db | ||
|
|
ae6b3bdfbf | ||
|
|
008afa5d59 | ||
|
|
659b5f8088 | ||
|
|
9e88ad8da9 | ||
|
|
45b13c3037 | ||
|
|
ef6cc3aa1d | ||
|
|
28b822fe38 | ||
|
|
d4d39aa4c7 | ||
|
|
fb0fe3b8c3 | ||
|
|
2350b76a91 | ||
|
|
09b6f03592 | ||
|
|
72c1cb85f1 | ||
|
|
64a5548454 | ||
|
|
81a037df9d | ||
|
|
2c57f0fbd5 | ||
|
|
04e9486651 | ||
|
|
2a2ee62cfd | ||
|
|
62766b1326 | ||
|
|
5dc021d15b | ||
|
|
34c3ec2f8c | ||
|
|
45e117b043 | ||
|
|
9dc4a80b17 | ||
|
|
df0d8c45d2 | ||
|
|
b769b1be26 | ||
|
|
44c5ea3639 | ||
|
|
6b00443bc1 | ||
|
|
8959f3521f | ||
|
|
6a0143400f | ||
|
|
748551dafb | ||
|
|
19c14589d3 | ||
|
|
a8245cf2f1 | ||
|
|
2afc1060ef | ||
|
|
5882cc0502 | ||
|
|
04dcada85f | ||
|
|
b2b95e96c1 | ||
|
|
f81c783b5b | ||
|
|
cc4335a9c6 | ||
|
|
f7ac4e1eb4 | ||
|
|
1b76e7e890 | ||
|
|
80a673a27f | ||
|
|
ad48259d7e | ||
|
|
3b4da2d7fa | ||
|
|
e9bb95ae16 | ||
|
|
9477d23d70 | ||
|
|
bfd2bcb068 | ||
|
|
5942d206c2 | ||
|
|
822edff5bd | ||
|
|
3b0458a7a5 | ||
|
|
65f821850e | ||
|
|
df42100320 | ||
|
|
966896daad | ||
|
|
625a011888 | ||
|
|
12355cbf02 | ||
|
|
37ef2ee595 | ||
|
|
302a5f00e8 | ||
|
|
981d8e82d2 | ||
|
|
bda177ef40 | ||
|
|
a175ba1089 | ||
|
|
1e9b1d439f | ||
|
|
436457b6e7 | ||
|
|
2c4c7aea8a | ||
|
|
b7febb5625 | ||
|
|
0e9a6759f9 | ||
|
|
dd790abab0 | ||
|
|
46dda01151 | ||
|
|
6ff2db181f | ||
|
|
a1335d3d51 | ||
|
|
ffbde909c8 | ||
|
|
f83ef80ebd | ||
|
|
51512d01d8 | ||
|
|
d98b0f8f48 | ||
|
|
c795207fb2 | ||
|
|
5b8bc56e65 | ||
|
|
dc18a1261c | ||
|
|
dca2e2c8f1 | ||
|
|
83f8c1a25e | ||
|
|
4cd8b2f1f7 | ||
|
|
2d33b2c55a | ||
|
|
2ef4591e58 | ||
|
|
f1b58f0cd9 | ||
|
|
dd0679d710 | ||
|
|
4a67a5b917 | ||
|
|
e7c1d7bf77 | ||
|
|
bf9f737c60 | ||
|
|
fb796843df | ||
|
|
0bd8cecc94 | ||
|
|
e8401964b4 | ||
|
|
132f2006af | ||
|
|
e1ecf64701 | ||
|
|
5f4e7c77bd | ||
|
|
40acc2c079 | ||
|
|
c61b973968 | ||
|
|
0e0fc07135 | ||
|
|
fd873fd369 | ||
|
|
ca9afa3293 | ||
|
|
ff2b7cc0d3 | ||
|
|
3c8f936b31 | ||
|
|
a7fd61fcce | ||
|
|
8def504d73 | ||
|
|
c17ee0da5d | ||
|
|
c3c7603076 | ||
|
|
8be9e5b48b | ||
|
|
5f517e3e16 | ||
|
|
f8650a9580 | ||
|
|
3ff978aa4f | ||
|
|
301e2b5b7a | ||
|
|
432f045dba | ||
|
|
8f22f5470c | ||
|
|
72541af3bc | ||
|
|
fade63b58e | ||
|
|
c2b550987b | ||
|
|
e996f1ad09 | ||
|
|
f728a504aa | ||
|
|
b483f2d010 | ||
|
|
8495e1bd83 | ||
|
|
d8df9a16bd | ||
|
|
390ee10eef | ||
|
|
d583e01f54 | ||
|
|
9754a8145c | ||
|
|
5b9aedfc21 | ||
|
|
8620de6b20 | ||
|
|
89c15dd115 | ||
|
|
fe494a0ccd | ||
|
|
91084d9396 | ||
|
|
c8bf0caca0 | ||
|
|
6676687694 | ||
|
|
95fa57f007 | ||
|
|
7f37822c74 | ||
|
|
fb99446f24 | ||
|
|
cc2c3e447f | ||
|
|
28e78d81b2 | ||
|
|
185388f341 | ||
|
|
76b465f3ef | ||
|
|
af540b0057 | ||
|
|
06e0506cb3 | ||
|
|
71264ce9a7 | ||
|
|
6dc1d48fd1 | ||
|
|
3e03391a49 | ||
|
|
be8fd5490e | ||
|
|
ba2ea7eeac | ||
|
|
22be115eb2 | ||
|
|
0ec71b78fb | ||
|
|
93f7719eed | ||
|
|
4038ca2e5d | ||
|
|
e11e1dcf2d | ||
|
|
f1e278c30f | ||
|
|
980973d83e | ||
|
|
45aee996c1 | ||
|
|
a2952ac213 | ||
|
|
5e35c69f35 | ||
|
|
2c2ef9252f | ||
|
|
06cf705501 | ||
|
|
0d7de7c2db | ||
|
|
baff865d7c | ||
|
|
d9a15a935b | ||
|
|
7bd603061c | ||
|
|
c320da3f63 | ||
|
|
a4bfae1b55 | ||
|
|
2b98da2ed4 | ||
|
|
0fb19e9bef | ||
|
|
de1a316369 | ||
|
|
b15e1a3501 | ||
|
|
197b5d19bc | ||
|
|
99d2d77062 | ||
|
|
703c57a119 | ||
|
|
eb3cb54aa5 | ||
|
|
03dfc8d8e7 | ||
|
|
81c1bfafea | ||
|
|
6b2f653143 | ||
|
|
354130cd84 | ||
|
|
82f6037ec2 | ||
|
|
912dd50146 | ||
|
|
952d1ac487 | ||
|
|
b4e6d6c385 | ||
|
|
1212fa60b6 | ||
|
|
8a155c4058 | ||
|
|
92d5c63f01 | ||
|
|
f12701b303 | ||
|
|
d1309fb275 | ||
|
|
c3c43e32fc | ||
|
|
7420a717e6 | ||
|
|
4bbf173fc1 | ||
|
|
fb43b8efd2 | ||
|
|
35c3c078e3 | ||
|
|
1ae883435d | ||
|
|
8fc6e92ef1 | ||
|
|
7a3c884e39 | ||
|
|
46cd71d1c7 | ||
|
|
5e161b2531 | ||
|
|
32df83e55d | ||
|
|
05f58144c9 | ||
|
|
fe9588f4a0 | ||
|
|
25724898d0 | ||
|
|
e07540264d | ||
|
|
0f932d30f5 | ||
|
|
64a24f3344 | ||
|
|
4aae21e1e4 | ||
|
|
c190586597 | ||
|
|
7bad1974a6 | ||
|
|
d7128845c9 | ||
|
|
c68d0dc851 | ||
|
|
b4451c5e81 | ||
|
|
613b3671b7 | ||
|
|
8eea7c1176 | ||
|
|
385a4555d5 | ||
|
|
61f707d708 | ||
|
|
154a7653f9 | ||
|
|
c72571055b | ||
|
|
78be397723 | ||
|
|
aaf9e39f56 | ||
|
|
16392a23cc | ||
|
|
06cef3355e | ||
|
|
2c27127d04 | ||
|
|
bcb702fa3e | ||
|
|
21199cb965 | ||
|
|
123568ef80 | ||
|
|
aace20afc7 | ||
|
|
0d24b1a31b | ||
|
|
5b60899fbc | ||
|
|
e8d40559d5 | ||
|
|
e112d0a52f | ||
|
|
dc02b03c4a | ||
|
|
275b96a0e2 | ||
|
|
43d9f417ae | ||
|
|
4f13e270c8 | ||
|
|
2a6e6306d8 | ||
|
|
4e6aa1cfdd | ||
|
|
1ccf805367 | ||
|
|
ace8a8e86e | ||
|
|
6b354ccaee | ||
|
|
ac1e4734c2 | ||
|
|
d235cf3933 | ||
|
|
3753553b6a | ||
|
|
4801f4250d | ||
|
|
7d904fef2e | ||
|
|
3a49c1a691 | ||
|
|
beb951770a | ||
|
|
fd5776aac2 | ||
|
|
87a8925523 | ||
|
|
974d731926 | ||
|
|
7265e80c12 | ||
|
|
86592b274e | ||
|
|
71e18dddbe | ||
|
|
f64456c7e2 | ||
|
|
ec58aabb26 | ||
|
|
c90268127b | ||
|
|
bf8bd60ab3 | ||
|
|
f478a57737 | ||
|
|
235b5d27ae | ||
|
|
beaa25d777 | ||
|
|
8a5356357f | ||
|
|
62f67df6d7 | ||
|
|
55fb8e7bdd | ||
|
|
57c9da1b39 | ||
|
|
a745d87971 | ||
|
|
53d92318b8 | ||
|
|
d4f871cb6a | ||
|
|
1ff341f3dc | ||
|
|
0d47c1d527 | ||
|
|
9e109849ff | ||
|
|
904ac1daec | ||
|
|
6d30745d77 | ||
|
|
eb318ffffc | ||
|
|
0bddb794b0 | ||
|
|
5dfb8743cb | ||
|
|
8c27a74132 | ||
|
|
803ac4ca59 | ||
|
|
cdbee27692 | ||
|
|
7344a7c447 | ||
|
|
f687392e6f | ||
|
|
53ea06dc17 | ||
|
|
085adfea00 | ||
|
|
11f0f7598d | ||
|
|
dce8720780 | ||
|
|
47843b4f09 | ||
|
|
25f88d99ce | ||
|
|
d1435009ed | ||
|
|
d937421422 | ||
|
|
43cad754d5 | ||
|
|
b3587102d1 | ||
|
|
368b3ee227 | ||
|
|
661483f313 | ||
|
|
ff3aa5d380 | ||
|
|
bc69cc1511 | ||
|
|
24c1bb3842 | ||
|
|
a19dc3bf00 | ||
|
|
d53b79ff5c |
@@ -15,5 +15,5 @@ mv "${REV_NAME}-source.tar.xz" $RELEASE_NAME
|
||||
7z a "$REV_NAME.7z" $RELEASE_NAME
|
||||
|
||||
# move the compiled archive into the artifacts directory to be uploaded by travis releases
|
||||
mv "$ARCHIVE_NAME" artifacts/
|
||||
mv "$REV_NAME.7z" artifacts/
|
||||
mv "$ARCHIVE_NAME" "${ARTIFACTS_DIR}/"
|
||||
mv "$REV_NAME.7z" "${ARTIFACTS_DIR}/"
|
||||
|
||||
@@ -2,5 +2,6 @@
|
||||
|
||||
GITDATE="`git show -s --date=short --format='%ad' | sed 's/-//g'`"
|
||||
GITREV="`git show -s --format='%h'`"
|
||||
ARTIFACTS_DIR="artifacts"
|
||||
|
||||
mkdir -p artifacts
|
||||
mkdir -p "${ARTIFACTS_DIR}/"
|
||||
|
||||
@@ -1,14 +1,49 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Exit on error, rather than continuing with the rest of the script.
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON
|
||||
cmake .. -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DCMAKE_INSTALL_PREFIX="/usr"
|
||||
|
||||
ninja
|
||||
make -j$(nproc)
|
||||
|
||||
ccache -s
|
||||
|
||||
ctest -VV -C Release
|
||||
|
||||
make install DESTDIR=AppDir
|
||||
rm -vf AppDir/usr/bin/yuzu-cmd AppDir/usr/bin/yuzu-tester
|
||||
|
||||
# Download tools needed to build an AppImage
|
||||
wget -nc https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage
|
||||
wget -nc https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-x86_64.AppImage
|
||||
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/AppRun-patched-x86_64
|
||||
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/exec-x86_64.so
|
||||
# Set executable bit
|
||||
chmod 755 \
|
||||
AppRun-patched-x86_64 \
|
||||
exec-x86_64.so \
|
||||
linuxdeploy-x86_64.AppImage \
|
||||
linuxdeploy-plugin-qt-x86_64.AppImage
|
||||
|
||||
# Workaround for https://github.com/AppImage/AppImageKit/issues/828
|
||||
export APPIMAGE_EXTRACT_AND_RUN=1
|
||||
|
||||
mkdir -p AppDir/usr/optional
|
||||
mkdir -p AppDir/usr/optional/libstdc++
|
||||
mkdir -p AppDir/usr/optional/libgcc_s
|
||||
|
||||
# Deploy yuzu's needed dependencies
|
||||
./linuxdeploy-x86_64.AppImage --appdir AppDir --plugin qt
|
||||
|
||||
# Workaround for building yuzu with GCC 10 but also trying to distribute it to Ubuntu 18.04 et al.
|
||||
# See https://github.com/darealshinji/AppImageKit-checkrt
|
||||
cp exec-x86_64.so AppDir/usr/optional/exec.so
|
||||
cp AppRun-patched-x86_64 AppDir/AppRun
|
||||
cp --dereference /usr/lib/x86_64-linux-gnu/libstdc++.so.6 AppDir/usr/optional/libstdc++/libstdc++.so.6
|
||||
cp --dereference /lib/x86_64-linux-gnu/libgcc_s.so.1 AppDir/usr/optional/libgcc_s/libgcc_s.so.1
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
. .ci/scripts/common/pre-upload.sh
|
||||
|
||||
APPIMAGE_NAME="yuzu-${GITDATE}-${GITREV}.AppImage"
|
||||
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.xz"
|
||||
COMPRESSION_FLAGS="-cJvf"
|
||||
@@ -17,4 +18,24 @@ mkdir "$DIR_NAME"
|
||||
cp build/bin/yuzu-cmd "$DIR_NAME"
|
||||
cp build/bin/yuzu "$DIR_NAME"
|
||||
|
||||
# Build an AppImage
|
||||
cd build
|
||||
|
||||
wget -nc https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
|
||||
chmod 755 appimagetool-x86_64.AppImage
|
||||
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
# Generate update information if releasing to mainline
|
||||
./appimagetool-x86_64.AppImage -u "gh-releases-zsync|yuzu-emu|yuzu-${RELEASE_NAME}|latest|yuzu-*.AppImage.zsync" AppDir "${APPIMAGE_NAME}"
|
||||
else
|
||||
./appimagetool-x86_64.AppImage AppDir "${APPIMAGE_NAME}"
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Copy the AppImage and update info to the artifacts directory and avoid compressing it
|
||||
cp "build/${APPIMAGE_NAME}" "${ARTIFACTS_DIR}/"
|
||||
if [ -f "build/${APPIMAGE_NAME}.zsync" ]; then
|
||||
cp "build/${APPIMAGE_NAME}.zsync" "${ARTIFACTS_DIR}/"
|
||||
fi
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
|
||||
@@ -8,7 +8,7 @@ steps:
|
||||
displayName: 'Install vulkan-sdk'
|
||||
- script: python -m pip install --upgrade pip conan
|
||||
displayName: 'Install conan'
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
|
||||
displayName: 'Configure CMake'
|
||||
- task: MSBuild@1
|
||||
displayName: 'Build'
|
||||
|
||||
@@ -26,6 +26,10 @@ option(ENABLE_CUBEB "Enables the cubeb audio backend" ON)
|
||||
|
||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||
|
||||
if (NOT ENABLE_WEB_SERVICE)
|
||||
set(YUZU_ENABLE_BOXCAT OFF)
|
||||
endif()
|
||||
|
||||
# Default to a Release build
|
||||
get_property(IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if (NOT IS_MULTI_CONFIG AND NOT CMAKE_BUILD_TYPE)
|
||||
@@ -165,7 +169,7 @@ macro(yuzu_find_packages)
|
||||
"lz4 1.8 lz4/1.9.2"
|
||||
"nlohmann_json 3.8 nlohmann_json/3.8.0"
|
||||
"ZLIB 1.2 zlib/1.2.11"
|
||||
"zstd 1.4 zstd/1.4.5"
|
||||
"zstd 1.4 zstd/1.4.8"
|
||||
)
|
||||
|
||||
foreach(PACKAGE ${REQUIRED_LIBS})
|
||||
@@ -239,7 +243,7 @@ if(ENABLE_QT)
|
||||
if (YUZU_USE_QT_WEB_ENGINE)
|
||||
find_package(Qt5 COMPONENTS WebEngineCore WebEngineWidgets)
|
||||
endif()
|
||||
|
||||
|
||||
if (ENABLE_QT_TRANSLATION)
|
||||
find_package(Qt5 REQUIRED COMPONENTS LinguistTools ${QT_PREFIX_HINT})
|
||||
endif()
|
||||
@@ -257,7 +261,7 @@ if(ENABLE_SDL2)
|
||||
find_package(SDL2)
|
||||
if (NOT SDL2_FOUND)
|
||||
# otherwise add this to the list of libraries to install
|
||||
list(APPEND CONAN_REQUIRED_LIBS "sdl2/2.0.12@bincrafters/stable")
|
||||
list(APPEND CONAN_REQUIRED_LIBS "sdl2/2.0.14@bincrafters/stable")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -322,7 +326,7 @@ if (CONAN_REQUIRED_LIBS)
|
||||
list(APPEND Boost_LIBRARIES Boost::context)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
# Due to issues with variable scopes in functions, we need to also find_package(qt5) outside of the function
|
||||
if(ENABLE_QT)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CONAN_QT_ROOT_RELEASE}")
|
||||
|
||||
@@ -30,7 +30,6 @@ If you want to contribute to the user interface translation, please check out th
|
||||
|
||||
* __Windows__: [Windows Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Windows)
|
||||
* __Linux__: [Linux Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Linux)
|
||||
* __macOS__: [macOS Build](https://github.com/yuzu-emu/yuzu/wiki/Building-for-macOS)
|
||||
|
||||
|
||||
### Support
|
||||
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 4a9a0d07f7...8c09da666a
@@ -45,10 +45,15 @@ if (MSVC)
|
||||
|
||||
# Warnings
|
||||
/W3
|
||||
/we4062 # enumerator 'identifier' in a switch of enum 'enumeration' is not handled
|
||||
/we4101 # 'identifier': unreferenced local variable
|
||||
/we4265 # 'class': class has virtual functions, but destructor is not virtual
|
||||
/we4388 # signed/unsigned mismatch
|
||||
/we4547 # 'operator' : operator before comma has no effect; expected operator with side-effect
|
||||
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
|
||||
/we4555 # Expression has no effect; expected expression with side-effect
|
||||
/we4834 # Discarding return value of function with 'nodiscard' attribute
|
||||
/we5038 # data member 'member1' will be initialized after data member 'member2'
|
||||
)
|
||||
|
||||
# /GS- - No stack buffer overflow checks
|
||||
@@ -59,11 +64,16 @@ if (MSVC)
|
||||
else()
|
||||
add_compile_options(
|
||||
-Wall
|
||||
-Werror=array-bounds
|
||||
-Werror=implicit-fallthrough
|
||||
-Werror=missing-declarations
|
||||
-Werror=missing-field-initializers
|
||||
-Werror=reorder
|
||||
-Werror=switch
|
||||
-Werror=uninitialized
|
||||
-Werror=unused-function
|
||||
-Werror=unused-result
|
||||
-Werror=unused-variable
|
||||
-Wextra
|
||||
-Wmissing-declarations
|
||||
-Wno-attributes
|
||||
@@ -122,7 +132,6 @@ add_subdirectory(tests)
|
||||
|
||||
if (ENABLE_SDL2)
|
||||
add_subdirectory(yuzu_cmd)
|
||||
add_subdirectory(yuzu_tester)
|
||||
endif()
|
||||
|
||||
if (ENABLE_QT)
|
||||
|
||||
@@ -383,11 +383,14 @@ void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, E
|
||||
const auto channel_count = params.channel_count;
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
// TODO(ogniK): Actually implement reverb
|
||||
/*
|
||||
if (params.input[i] != params.output[i]) {
|
||||
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
|
||||
}
|
||||
}*/
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
std::memset(output, 0, worker_params.sample_count * sizeof(s32));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,17 +40,17 @@ public:
|
||||
SinkSampleFormat sample_format;
|
||||
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
|
||||
bool in_use;
|
||||
INSERT_UNION_PADDING_BYTES(5);
|
||||
INSERT_PADDING_BYTES_NOINIT(5);
|
||||
};
|
||||
static_assert(sizeof(CircularBufferIn) == 0x28,
|
||||
"SinkInfo::CircularBufferIn is in invalid size");
|
||||
|
||||
struct DeviceIn {
|
||||
std::array<u8, 255> device_name;
|
||||
INSERT_UNION_PADDING_BYTES(1);
|
||||
INSERT_PADDING_BYTES_NOINIT(1);
|
||||
s32_le input_count;
|
||||
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
|
||||
INSERT_UNION_PADDING_BYTES(1);
|
||||
INSERT_PADDING_BYTES_NOINIT(1);
|
||||
bool down_matrix_enabled;
|
||||
DownmixCoefficients down_matrix_coef;
|
||||
};
|
||||
|
||||
@@ -51,6 +51,14 @@ void Stream::Stop() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool Stream::Flush() {
|
||||
const bool had_buffers = !queued_buffers.empty();
|
||||
while (!queued_buffers.empty()) {
|
||||
queued_buffers.pop();
|
||||
}
|
||||
return had_buffers;
|
||||
}
|
||||
|
||||
void Stream::SetVolume(float volume) {
|
||||
game_volume = volume;
|
||||
}
|
||||
|
||||
@@ -56,6 +56,9 @@ public:
|
||||
/// Queues a buffer into the audio stream, returns true on success
|
||||
bool QueueBuffer(BufferPtr&& buffer);
|
||||
|
||||
/// Flush audio buffers
|
||||
bool Flush();
|
||||
|
||||
/// Returns true if the audio stream contains a buffer with the specified tag
|
||||
[[nodiscard]] bool ContainsBuffer(Buffer::Tag tag) const;
|
||||
|
||||
|
||||
@@ -86,28 +86,28 @@ struct BehaviorFlags {
|
||||
static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
|
||||
|
||||
struct ADPCMContext {
|
||||
u16 header{};
|
||||
s16 yn1{};
|
||||
s16 yn2{};
|
||||
u16 header;
|
||||
s16 yn1;
|
||||
s16 yn2;
|
||||
};
|
||||
static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
|
||||
|
||||
struct VoiceState {
|
||||
s64 played_sample_count{};
|
||||
s32 offset{};
|
||||
s32 wave_buffer_index{};
|
||||
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid{};
|
||||
s32 wave_buffer_consumed{};
|
||||
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history{};
|
||||
s32 fraction{};
|
||||
VAddr context_address{};
|
||||
Codec::ADPCM_Coeff coeff{};
|
||||
ADPCMContext context{};
|
||||
std::array<s64, 2> biquad_filter_state{};
|
||||
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples{};
|
||||
u32 external_context_size{};
|
||||
bool is_external_context_used{};
|
||||
bool voice_dropped{};
|
||||
s64 played_sample_count;
|
||||
s32 offset;
|
||||
s32 wave_buffer_index;
|
||||
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid;
|
||||
s32 wave_buffer_consumed;
|
||||
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history;
|
||||
s32 fraction;
|
||||
VAddr context_address;
|
||||
Codec::ADPCM_Coeff coeff;
|
||||
ADPCMContext context;
|
||||
std::array<s64, 2> biquad_filter_state;
|
||||
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples;
|
||||
u32 external_context_size;
|
||||
bool is_external_context_used;
|
||||
bool voice_dropped;
|
||||
};
|
||||
|
||||
class VoiceChannelResource {
|
||||
|
||||
@@ -98,7 +98,6 @@ add_library(common STATIC
|
||||
algorithm.h
|
||||
alignment.h
|
||||
assert.h
|
||||
atomic_ops.cpp
|
||||
atomic_ops.h
|
||||
detached_tasks.cpp
|
||||
detached_tasks.h
|
||||
@@ -108,7 +107,6 @@ add_library(common STATIC
|
||||
bit_util.h
|
||||
cityhash.cpp
|
||||
cityhash.h
|
||||
color.h
|
||||
common_funcs.h
|
||||
common_paths.h
|
||||
common_types.h
|
||||
@@ -123,6 +121,7 @@ add_library(common STATIC
|
||||
hash.h
|
||||
hex_util.cpp
|
||||
hex_util.h
|
||||
intrusive_red_black_tree.h
|
||||
logging/backend.cpp
|
||||
logging/backend.h
|
||||
logging/filter.cpp
|
||||
@@ -135,16 +134,17 @@ add_library(common STATIC
|
||||
math_util.h
|
||||
memory_detect.cpp
|
||||
memory_detect.h
|
||||
memory_hook.cpp
|
||||
memory_hook.h
|
||||
microprofile.cpp
|
||||
microprofile.h
|
||||
microprofileui.h
|
||||
misc.cpp
|
||||
nvidia_flags.cpp
|
||||
nvidia_flags.h
|
||||
page_table.cpp
|
||||
page_table.h
|
||||
param_package.cpp
|
||||
param_package.h
|
||||
parent_of_member.h
|
||||
quaternion.h
|
||||
ring_buffer.h
|
||||
scm_rev.cpp
|
||||
@@ -167,8 +167,7 @@ add_library(common STATIC
|
||||
threadsafe_queue.h
|
||||
time_zone.cpp
|
||||
time_zone.h
|
||||
timer.cpp
|
||||
timer.h
|
||||
tree.h
|
||||
uint128.cpp
|
||||
uint128.h
|
||||
uuid.cpp
|
||||
|
||||
@@ -9,50 +9,45 @@
|
||||
namespace Common {
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr T AlignUp(T value, std::size_t size) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUp(T value, size_t size) {
|
||||
auto mod{static_cast<T>(value % size)};
|
||||
value -= mod;
|
||||
return static_cast<T>(mod == T{0} ? value : value + size);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr T AlignDown(T value, std::size_t size) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUpLog2(T value, size_t align_log2) {
|
||||
return static_cast<T>((value + ((1ULL << align_log2) - 1)) >> align_log2 << align_log2);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignDown(T value, size_t size) {
|
||||
return static_cast<T>(value - value % size);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr T AlignBits(T value, std::size_t align) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool Is4KBAligned(T value) {
|
||||
return (value & 0xFFF) == 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr bool IsWordAligned(T value) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool IsWordAligned(T value) {
|
||||
return (value & 0b11) == 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
[[nodiscard]] constexpr bool IsAligned(T value, std::size_t alignment) {
|
||||
using U = typename std::make_unsigned<T>::type;
|
||||
requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, size_t alignment) {
|
||||
using U = typename std::make_unsigned_t<T>;
|
||||
const U mask = static_cast<U>(alignment - 1);
|
||||
return (value & mask) == 0;
|
||||
}
|
||||
|
||||
template <typename T, std::size_t Align = 16>
|
||||
template <typename T, size_t Align = 16>
|
||||
class AlignmentAllocator {
|
||||
public:
|
||||
using value_type = T;
|
||||
using size_type = std::size_t;
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
|
||||
@@ -29,22 +29,19 @@ assert_noinline_call(const Fn& fn) {
|
||||
}
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed!"); \
|
||||
}
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed! " __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
|
||||
#define UNREACHABLE() \
|
||||
{ LOG_CRITICAL(Debug, "Unreachable code!"); }
|
||||
#define UNREACHABLE_MSG(...) \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
|
||||
{ LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); }
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "common/atomic_ops.h"
|
||||
|
||||
#if _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
namespace Common {
|
||||
|
||||
#if _MSC_VER
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
|
||||
const u8 result =
|
||||
_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
|
||||
const u16 result =
|
||||
_InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
|
||||
const u32 result =
|
||||
_InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
|
||||
const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
|
||||
value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
|
||||
return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
|
||||
value[0],
|
||||
reinterpret_cast<__int64*>(expected.data())) != 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
|
||||
unsigned __int128 value_a;
|
||||
unsigned __int128 expected_a;
|
||||
std::memcpy(&value_a, value.data(), sizeof(u128));
|
||||
std::memcpy(&expected_a, expected.data(), sizeof(u128));
|
||||
return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace Common
|
||||
@@ -4,14 +4,75 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#if _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
namespace Common {
|
||||
|
||||
[[nodiscard]] bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected);
|
||||
[[nodiscard]] bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected);
|
||||
[[nodiscard]] bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected);
|
||||
[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected);
|
||||
[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected);
|
||||
#if _MSC_VER
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
|
||||
const u8 result =
|
||||
_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
|
||||
const u16 result =
|
||||
_InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
|
||||
const u32 result =
|
||||
_InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
|
||||
const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
|
||||
value, expected);
|
||||
return result == expected;
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
|
||||
return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
|
||||
value[0],
|
||||
reinterpret_cast<__int64*>(expected.data())) != 0;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
|
||||
return __sync_bool_compare_and_swap(pointer, expected, value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
|
||||
unsigned __int128 value_a;
|
||||
unsigned __int128 expected_a;
|
||||
std::memcpy(&value_a, value.data(), sizeof(u128));
|
||||
std::memcpy(&expected_a, expected.data(), sizeof(u128));
|
||||
return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,13 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <bit>
|
||||
#include <climits>
|
||||
#include <cstddef>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -21,124 +18,30 @@ template <typename T>
|
||||
return sizeof(T) * CHAR_BIT;
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) {
|
||||
unsigned long leading_zero = 0;
|
||||
|
||||
if (_BitScanReverse(&leading_zero, value) != 0) {
|
||||
return 31 - leading_zero;
|
||||
}
|
||||
|
||||
return 32;
|
||||
[[nodiscard]] constexpr u32 MostSignificantBit32(const u32 value) {
|
||||
return 31U - static_cast<u32>(std::countl_zero(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
|
||||
unsigned long leading_zero = 0;
|
||||
|
||||
if (_BitScanReverse64(&leading_zero, value) != 0) {
|
||||
return 63 - leading_zero;
|
||||
}
|
||||
|
||||
return 64;
|
||||
}
|
||||
#else
|
||||
[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) {
|
||||
if (value == 0) {
|
||||
return 32;
|
||||
}
|
||||
|
||||
return static_cast<u32>(__builtin_clz(value));
|
||||
[[nodiscard]] constexpr u32 MostSignificantBit64(const u64 value) {
|
||||
return 63U - static_cast<u32>(std::countl_zero(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
|
||||
if (value == 0) {
|
||||
return 64;
|
||||
}
|
||||
|
||||
return static_cast<u32>(__builtin_clzll(value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
|
||||
unsigned long trailing_zero = 0;
|
||||
|
||||
if (_BitScanForward(&trailing_zero, value) != 0) {
|
||||
return trailing_zero;
|
||||
}
|
||||
|
||||
return 32;
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
|
||||
unsigned long trailing_zero = 0;
|
||||
|
||||
if (_BitScanForward64(&trailing_zero, value) != 0) {
|
||||
return trailing_zero;
|
||||
}
|
||||
|
||||
return 64;
|
||||
}
|
||||
#else
|
||||
[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
|
||||
if (value == 0) {
|
||||
return 32;
|
||||
}
|
||||
|
||||
return static_cast<u32>(__builtin_ctz(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
|
||||
if (value == 0) {
|
||||
return 64;
|
||||
}
|
||||
|
||||
return static_cast<u32>(__builtin_ctzll(value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
||||
[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) {
|
||||
unsigned long result;
|
||||
_BitScanReverse(&result, value);
|
||||
return static_cast<u32>(result);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) {
|
||||
unsigned long result;
|
||||
_BitScanReverse64(&result, value);
|
||||
return static_cast<u32>(result);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) {
|
||||
return 31U - static_cast<u32>(__builtin_clz(value));
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) {
|
||||
return 63U - static_cast<u32>(__builtin_clzll(value));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
[[nodiscard]] inline u32 Log2Floor32(const u32 value) {
|
||||
[[nodiscard]] constexpr u32 Log2Floor32(const u32 value) {
|
||||
return MostSignificantBit32(value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 Log2Ceil32(const u32 value) {
|
||||
const u32 log2_f = Log2Floor32(value);
|
||||
return log2_f + ((value ^ (1U << log2_f)) != 0U);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 Log2Floor64(const u64 value) {
|
||||
[[nodiscard]] constexpr u32 Log2Floor64(const u64 value) {
|
||||
return MostSignificantBit64(value);
|
||||
}
|
||||
|
||||
[[nodiscard]] inline u32 Log2Ceil64(const u64 value) {
|
||||
const u64 log2_f = static_cast<u64>(Log2Floor64(value));
|
||||
return static_cast<u32>(log2_f + ((value ^ (1ULL << log2_f)) != 0ULL));
|
||||
[[nodiscard]] constexpr u32 Log2Ceil32(const u32 value) {
|
||||
const u32 log2_f = Log2Floor32(value);
|
||||
return log2_f + static_cast<u32>((value ^ (1U << log2_f)) != 0U);
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr u32 Log2Ceil64(const u64 value) {
|
||||
const u64 log2_f = Log2Floor64(value);
|
||||
return static_cast<u32>(log2_f + static_cast<u64>((value ^ (1ULL << log2_f)) != 0ULL));
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "common/vector_math.h"
|
||||
|
||||
namespace Common::Color {
|
||||
|
||||
/// Convert a 1-bit color component to 8 bit
|
||||
[[nodiscard]] constexpr u8 Convert1To8(u8 value) {
|
||||
return value * 255;
|
||||
}
|
||||
|
||||
/// Convert a 4-bit color component to 8 bit
|
||||
[[nodiscard]] constexpr u8 Convert4To8(u8 value) {
|
||||
return (value << 4) | value;
|
||||
}
|
||||
|
||||
/// Convert a 5-bit color component to 8 bit
|
||||
[[nodiscard]] constexpr u8 Convert5To8(u8 value) {
|
||||
return (value << 3) | (value >> 2);
|
||||
}
|
||||
|
||||
/// Convert a 6-bit color component to 8 bit
|
||||
[[nodiscard]] constexpr u8 Convert6To8(u8 value) {
|
||||
return (value << 2) | (value >> 4);
|
||||
}
|
||||
|
||||
/// Convert a 8-bit color component to 1 bit
|
||||
[[nodiscard]] constexpr u8 Convert8To1(u8 value) {
|
||||
return value >> 7;
|
||||
}
|
||||
|
||||
/// Convert a 8-bit color component to 4 bit
|
||||
[[nodiscard]] constexpr u8 Convert8To4(u8 value) {
|
||||
return value >> 4;
|
||||
}
|
||||
|
||||
/// Convert a 8-bit color component to 5 bit
|
||||
[[nodiscard]] constexpr u8 Convert8To5(u8 value) {
|
||||
return value >> 3;
|
||||
}
|
||||
|
||||
/// Convert a 8-bit color component to 6 bit
|
||||
[[nodiscard]] constexpr u8 Convert8To6(u8 value) {
|
||||
return value >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RGBA8 format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA8(const u8* bytes) {
|
||||
return {bytes[3], bytes[2], bytes[1], bytes[0]};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RGB8 format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB8(const u8* bytes) {
|
||||
return {bytes[2], bytes[1], bytes[0], 255};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RG8 (aka HILO8) format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRG8(const u8* bytes) {
|
||||
return {bytes[1], bytes[0], 0, 255};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RGB565 format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB565(const u8* bytes) {
|
||||
u16_le pixel;
|
||||
std::memcpy(&pixel, bytes, sizeof(pixel));
|
||||
return {Convert5To8((pixel >> 11) & 0x1F), Convert6To8((pixel >> 5) & 0x3F),
|
||||
Convert5To8(pixel & 0x1F), 255};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RGB5A1 format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB5A1(const u8* bytes) {
|
||||
u16_le pixel;
|
||||
std::memcpy(&pixel, bytes, sizeof(pixel));
|
||||
return {Convert5To8((pixel >> 11) & 0x1F), Convert5To8((pixel >> 6) & 0x1F),
|
||||
Convert5To8((pixel >> 1) & 0x1F), Convert1To8(pixel & 0x1)};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a color stored in RGBA4 format
|
||||
* @param bytes Pointer to encoded source color
|
||||
* @return Result color decoded as Common::Vec4<u8>
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA4(const u8* bytes) {
|
||||
u16_le pixel;
|
||||
std::memcpy(&pixel, bytes, sizeof(pixel));
|
||||
return {Convert4To8((pixel >> 12) & 0xF), Convert4To8((pixel >> 8) & 0xF),
|
||||
Convert4To8((pixel >> 4) & 0xF), Convert4To8(pixel & 0xF)};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a depth value stored in D16 format
|
||||
* @param bytes Pointer to encoded source value
|
||||
* @return Depth value as an u32
|
||||
*/
|
||||
[[nodiscard]] inline u32 DecodeD16(const u8* bytes) {
|
||||
u16_le data;
|
||||
std::memcpy(&data, bytes, sizeof(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a depth value stored in D24 format
|
||||
* @param bytes Pointer to encoded source value
|
||||
* @return Depth value as an u32
|
||||
*/
|
||||
[[nodiscard]] inline u32 DecodeD24(const u8* bytes) {
|
||||
return (bytes[2] << 16) | (bytes[1] << 8) | bytes[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a depth value and a stencil value stored in D24S8 format
|
||||
* @param bytes Pointer to encoded source values
|
||||
* @return Resulting values stored as a Common::Vec2
|
||||
*/
|
||||
[[nodiscard]] inline Common::Vec2<u32> DecodeD24S8(const u8* bytes) {
|
||||
return {static_cast<u32>((bytes[2] << 16) | (bytes[1] << 8) | bytes[0]), bytes[3]};
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a color as RGBA8 format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRGBA8(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
bytes[3] = color.r();
|
||||
bytes[2] = color.g();
|
||||
bytes[1] = color.b();
|
||||
bytes[0] = color.a();
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a color as RGB8 format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRGB8(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
bytes[2] = color.r();
|
||||
bytes[1] = color.g();
|
||||
bytes[0] = color.b();
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a color as RG8 (aka HILO8) format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRG8(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
bytes[1] = color.r();
|
||||
bytes[0] = color.g();
|
||||
}
|
||||
/**
|
||||
* Encode a color as RGB565 format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRGB565(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
const u16_le data =
|
||||
(Convert8To5(color.r()) << 11) | (Convert8To6(color.g()) << 5) | Convert8To5(color.b());
|
||||
|
||||
std::memcpy(bytes, &data, sizeof(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a color as RGB5A1 format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRGB5A1(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
const u16_le data = (Convert8To5(color.r()) << 11) | (Convert8To5(color.g()) << 6) |
|
||||
(Convert8To5(color.b()) << 1) | Convert8To1(color.a());
|
||||
|
||||
std::memcpy(bytes, &data, sizeof(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a color as RGBA4 format
|
||||
* @param color Source color to encode
|
||||
* @param bytes Destination pointer to store encoded color
|
||||
*/
|
||||
inline void EncodeRGBA4(const Common::Vec4<u8>& color, u8* bytes) {
|
||||
const u16 data = (Convert8To4(color.r()) << 12) | (Convert8To4(color.g()) << 8) |
|
||||
(Convert8To4(color.b()) << 4) | Convert8To4(color.a());
|
||||
|
||||
std::memcpy(bytes, &data, sizeof(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a 16 bit depth value as D16 format
|
||||
* @param value 16 bit source depth value to encode
|
||||
* @param bytes Pointer where to store the encoded value
|
||||
*/
|
||||
inline void EncodeD16(u32 value, u8* bytes) {
|
||||
const u16_le data = static_cast<u16>(value);
|
||||
std::memcpy(bytes, &data, sizeof(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a 24 bit depth value as D24 format
|
||||
* @param value 24 bit source depth value to encode
|
||||
* @param bytes Pointer where to store the encoded value
|
||||
*/
|
||||
inline void EncodeD24(u32 value, u8* bytes) {
|
||||
bytes[0] = value & 0xFF;
|
||||
bytes[1] = (value >> 8) & 0xFF;
|
||||
bytes[2] = (value >> 16) & 0xFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a 24 bit depth and 8 bit stencil values as D24S8 format
|
||||
* @param depth 24 bit source depth value to encode
|
||||
* @param stencil 8 bit source stencil value to encode
|
||||
* @param bytes Pointer where to store the encoded value
|
||||
*/
|
||||
inline void EncodeD24S8(u32 depth, u8 stencil, u8* bytes) {
|
||||
bytes[0] = depth & 0xFF;
|
||||
bytes[1] = (depth >> 8) & 0xFF;
|
||||
bytes[2] = (depth >> 16) & 0xFF;
|
||||
bytes[3] = stencil;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode a 24 bit depth value as D24X8 format (32 bits per pixel with 8 bits unused)
|
||||
* @param depth 24 bit source depth value to encode
|
||||
* @param bytes Pointer where to store the encoded value
|
||||
* @note unused bits will not be modified
|
||||
*/
|
||||
inline void EncodeD24X8(u32 depth, u8* bytes) {
|
||||
bytes[0] = depth & 0xFF;
|
||||
bytes[1] = (depth >> 8) & 0xFF;
|
||||
bytes[2] = (depth >> 16) & 0xFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode an 8 bit stencil value as X24S8 format (32 bits per pixel with 24 bits unused)
|
||||
* @param stencil 8 bit source stencil value to encode
|
||||
* @param bytes Pointer where to store the encoded value
|
||||
* @note unused bits will not be modified
|
||||
*/
|
||||
inline void EncodeX24S8(u8 stencil, u8* bytes) {
|
||||
bytes[3] = stencil;
|
||||
}
|
||||
|
||||
} // namespace Common::Color
|
||||
@@ -24,10 +24,10 @@
|
||||
#define INSERT_PADDING_WORDS(num_words) \
|
||||
std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
|
||||
|
||||
/// These are similar to the INSERT_PADDING_* macros, but are needed for padding unions. This is
|
||||
/// because unions can only be initialized by one member.
|
||||
#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
|
||||
#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
|
||||
/// These are similar to the INSERT_PADDING_* macros but do not zero-initialize the contents.
|
||||
/// This keeps the structure trivial to construct.
|
||||
#define INSERT_PADDING_BYTES_NOINIT(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
|
||||
#define INSERT_PADDING_WORDS_NOINIT(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
|
||||
|
||||
#ifndef _MSC_VER
|
||||
|
||||
@@ -93,6 +93,31 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
|
||||
return static_cast<T>(key) == 0; \
|
||||
}
|
||||
|
||||
/// Evaluates a boolean expression, and returns a result unless that expression is true.
|
||||
#define R_UNLESS(expr, res) \
|
||||
{ \
|
||||
if (!(expr)) { \
|
||||
if (res.IsError()) { \
|
||||
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
|
||||
} \
|
||||
return res; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define R_SUCCEEDED(res) (res.IsSuccess())
|
||||
|
||||
/// Evaluates an expression that returns a result, and returns the result if it would fail.
|
||||
#define R_TRY(res_expr) \
|
||||
{ \
|
||||
const auto _tmp_r_try_rc = (res_expr); \
|
||||
if (_tmp_r_try_rc.IsError()) { \
|
||||
return _tmp_r_try_rc; \
|
||||
} \
|
||||
}
|
||||
|
||||
/// Evaluates a boolean expression, and succeeds if that expression is true.
|
||||
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), RESULT_SUCCESS)
|
||||
|
||||
namespace Common {
|
||||
|
||||
[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {
|
||||
|
||||
@@ -11,16 +11,16 @@ namespace Common {
|
||||
|
||||
/// Ceiled integer division.
|
||||
template <typename N, typename D>
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeil(
|
||||
N number, D divisor) {
|
||||
return (static_cast<D>(number) + divisor - 1) / divisor;
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeil(N number,
|
||||
D divisor) {
|
||||
return static_cast<N>((static_cast<D>(number) + divisor - 1) / divisor);
|
||||
}
|
||||
|
||||
/// Ceiled integer division with logarithmic divisor in base 2
|
||||
template <typename N, typename D>
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeilLog2(
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeilLog2(
|
||||
N value, D alignment_log2) {
|
||||
return (static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2;
|
||||
return static_cast<N>((static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2);
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
602
src/common/intrusive_red_black_tree.h
Normal file
602
src/common/intrusive_red_black_tree.h
Normal file
@@ -0,0 +1,602 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/parent_of_member.h"
|
||||
#include "common/tree.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class IntrusiveRedBlackTreeImpl;
|
||||
|
||||
}
|
||||
|
||||
struct IntrusiveRedBlackTreeNode {
|
||||
public:
|
||||
using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
|
||||
|
||||
constexpr IntrusiveRedBlackTreeNode() = default;
|
||||
|
||||
void SetEntry(const EntryType& new_entry) {
|
||||
entry = new_entry;
|
||||
}
|
||||
|
||||
[[nodiscard]] EntryType& GetEntry() {
|
||||
return entry;
|
||||
}
|
||||
|
||||
[[nodiscard]] const EntryType& GetEntry() const {
|
||||
return entry;
|
||||
}
|
||||
|
||||
private:
|
||||
EntryType entry{};
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
};
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree;
|
||||
|
||||
namespace impl {
|
||||
|
||||
class IntrusiveRedBlackTreeImpl {
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class ::Common::IntrusiveRedBlackTree;
|
||||
|
||||
using RootType = RBHead<IntrusiveRedBlackTreeNode>;
|
||||
RootType root;
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = IntrusiveRedBlackTreeNode;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
|
||||
using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
|
||||
using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
|
||||
IntrusiveRedBlackTreeImpl::pointer>;
|
||||
using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
|
||||
IntrusiveRedBlackTreeImpl::reference>;
|
||||
|
||||
private:
|
||||
pointer node;
|
||||
|
||||
public:
|
||||
explicit Iterator(pointer n) : node(n) {}
|
||||
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->node == rhs.node;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return this->node;
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *this->node;
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
this->node = GetNext(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
this->node = GetPrev(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->node);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
// Define accessors using RB_* functions.
|
||||
bool EmptyImpl() const {
|
||||
return root.IsEmpty();
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||
return RB_MIN(const_cast<RootType*>(&root));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||
return RB_MAX(const_cast<RootType*>(&root));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_REMOVE(&root, node);
|
||||
}
|
||||
|
||||
public:
|
||||
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_NEXT(node);
|
||||
}
|
||||
|
||||
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_PREV(node);
|
||||
}
|
||||
|
||||
static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTreeImpl() {}
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(&ref);
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(&ref);
|
||||
}
|
||||
|
||||
// Content management.
|
||||
bool empty() const {
|
||||
return this->EmptyImpl();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
iterator erase(iterator it) {
|
||||
auto cur = std::addressof(*it);
|
||||
auto next = GetNext(cur);
|
||||
this->RemoveImpl(cur);
|
||||
return iterator(next);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <typename T>
|
||||
concept HasLightCompareType = requires {
|
||||
{ std::is_same<typename T::LightCompareType, void>::value }
|
||||
->std::convertible_to<bool>;
|
||||
};
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <typename T, typename Default>
|
||||
consteval auto* GetLightCompareType() {
|
||||
if constexpr (HasLightCompareType<T>) {
|
||||
return static_cast<typename T::LightCompareType*>(nullptr);
|
||||
} else {
|
||||
return static_cast<Default*>(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <typename T, typename Default>
|
||||
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree {
|
||||
|
||||
public:
|
||||
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
ImplType impl{};
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = T*;
|
||||
using const_pointer = const T*;
|
||||
using reference = T&;
|
||||
using const_reference = const T&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
|
||||
using light_value_type = LightCompareType<Comparator, value_type>;
|
||||
using const_light_pointer = const light_value_type*;
|
||||
using const_light_reference = const light_value_type&;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
public:
|
||||
friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
|
||||
|
||||
using ImplIterator =
|
||||
std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
|
||||
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename IntrusiveRedBlackTree::value_type;
|
||||
using difference_type = typename IntrusiveRedBlackTree::difference_type;
|
||||
using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
|
||||
IntrusiveRedBlackTree::pointer>;
|
||||
using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
|
||||
IntrusiveRedBlackTree::reference>;
|
||||
|
||||
private:
|
||||
ImplIterator iterator;
|
||||
|
||||
private:
|
||||
explicit Iterator(ImplIterator it) : iterator(it) {}
|
||||
|
||||
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
|
||||
ImplType::iterator>::type::pointer ptr)
|
||||
: iterator(ptr) {}
|
||||
|
||||
ImplIterator GetImplIterator() const {
|
||||
return this->iterator;
|
||||
}
|
||||
|
||||
public:
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->iterator == rhs.iterator;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
++this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
--this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->iterator);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||
const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
// Define accessors using RB_* functions.
|
||||
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_INSERT(&impl.root, node, CompareImpl);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
||||
static_cast<const void*>(lelm), LightCompareImpl);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
|
||||
static_cast<const void*>(lelm), LightCompareImpl);
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTree() = default;
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(this->impl.end());
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(this->impl.end());
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
// Content management.
|
||||
bool empty() const {
|
||||
return this->impl.empty();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
iterator erase(iterator it) {
|
||||
return iterator(this->impl.erase(it.GetImplIterator()));
|
||||
}
|
||||
|
||||
iterator insert(reference ref) {
|
||||
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
||||
this->InsertImpl(node);
|
||||
return iterator(node);
|
||||
}
|
||||
|
||||
iterator find(const_reference ref) const {
|
||||
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
iterator nfind(const_reference ref) const {
|
||||
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
iterator find_light(const_light_reference ref) const {
|
||||
return iterator(this->FindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
|
||||
iterator nfind_light(const_light_reference ref) const {
|
||||
return iterator(this->NFindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
};
|
||||
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraits;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr TypedStorage<Derived> DerivedStorage = {};
|
||||
static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
|
||||
};
|
||||
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType =
|
||||
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr bool IsValid() {
|
||||
TypedStorage<Derived> DerivedStorage = {};
|
||||
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||
public:
|
||||
constexpr Derived* GetPrev() {
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
constexpr const Derived* GetPrev() const {
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
|
||||
constexpr Derived* GetNext() {
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
constexpr const Derived* GetNext() const {
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class IntrusiveRedBlackTreeBaseTraits {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<Derived*>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const Derived*>(node);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
@@ -145,10 +145,18 @@ void ColorConsoleBackend::Write(const Entry& entry) {
|
||||
PrintColoredMessage(entry);
|
||||
}
|
||||
|
||||
// _SH_DENYWR allows read only access to the file for other programs.
|
||||
// It is #defined to 0 on other platforms
|
||||
FileBackend::FileBackend(const std::string& filename)
|
||||
: file(filename, "w", _SH_DENYWR), bytes_written(0) {}
|
||||
FileBackend::FileBackend(const std::string& filename) : bytes_written(0) {
|
||||
if (Common::FS::Exists(filename + ".old.txt")) {
|
||||
Common::FS::Delete(filename + ".old.txt");
|
||||
}
|
||||
if (Common::FS::Exists(filename)) {
|
||||
Common::FS::Rename(filename, filename + ".old.txt");
|
||||
}
|
||||
|
||||
// _SH_DENYWR allows read only access to the file for other programs.
|
||||
// It is #defined to 0 on other platforms
|
||||
file = Common::FS::IOFile(filename, "w", _SH_DENYWR);
|
||||
}
|
||||
|
||||
void FileBackend::Write(const Entry& entry) {
|
||||
// prevent logs from going over the maximum size (in case its spamming and the user doesn't
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/memory_hook.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
MemoryHook::~MemoryHook() = default;
|
||||
|
||||
} // namespace Common
|
||||
@@ -1,47 +0,0 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
/**
|
||||
* Memory hooks have two purposes:
|
||||
* 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement
|
||||
* texture forwarding and memory breakpoints for debugging.
|
||||
* 2. To allow for the implementation of MMIO devices.
|
||||
*
|
||||
* A hook may be mapped to multiple regions of memory.
|
||||
*
|
||||
* If a std::nullopt or false is returned from a function, the read/write request is passed through
|
||||
* to the underlying memory region.
|
||||
*/
|
||||
class MemoryHook {
|
||||
public:
|
||||
virtual ~MemoryHook();
|
||||
|
||||
virtual std::optional<bool> IsValidAddress(VAddr addr) = 0;
|
||||
|
||||
virtual std::optional<u8> Read8(VAddr addr) = 0;
|
||||
virtual std::optional<u16> Read16(VAddr addr) = 0;
|
||||
virtual std::optional<u32> Read32(VAddr addr) = 0;
|
||||
virtual std::optional<u64> Read64(VAddr addr) = 0;
|
||||
|
||||
virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size) = 0;
|
||||
|
||||
virtual bool Write8(VAddr addr, u8 data) = 0;
|
||||
virtual bool Write16(VAddr addr, u16 data) = 0;
|
||||
virtual bool Write32(VAddr addr, u32 data) = 0;
|
||||
virtual bool Write64(VAddr addr, u64 data) = 0;
|
||||
|
||||
virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size) = 0;
|
||||
};
|
||||
|
||||
using MemoryHookPointer = std::shared_ptr<MemoryHook>;
|
||||
} // namespace Common
|
||||
27
src/common/nvidia_flags.cpp
Normal file
27
src/common/nvidia_flags.cpp
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <filesystem>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/file_util.h"
|
||||
#include "common/nvidia_flags.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
void ConfigureNvidiaEnvironmentFlags() {
|
||||
#ifdef _WIN32
|
||||
const std::string shader_path = Common::FS::SanitizePath(
|
||||
fmt::format("{}/nvidia/", Common::FS::GetUserPath(Common::FS::UserPath::ShaderDir)));
|
||||
const std::string windows_path =
|
||||
Common::FS::SanitizePath(shader_path, Common::FS::DirectorySeparator::BackwardSlash);
|
||||
void(Common::FS::CreateFullPath(shader_path + '/'));
|
||||
void(_putenv(fmt::format("__GL_SHADER_DISK_CACHE_PATH={}", windows_path).c_str()));
|
||||
void(_putenv("__GL_SHADER_DISK_CACHE_SKIP_CLEANUP=1"));
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
10
src/common/nvidia_flags.h
Normal file
10
src/common/nvidia_flags.h
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
namespace Common {
|
||||
|
||||
/// Configure platform specific flags for Nvidia's driver
|
||||
void ConfigureNvidiaEnvironmentFlags();
|
||||
|
||||
} // namespace Common
|
||||
@@ -10,16 +10,10 @@ PageTable::PageTable() = default;
|
||||
|
||||
PageTable::~PageTable() noexcept = default;
|
||||
|
||||
void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
|
||||
bool has_attribute) {
|
||||
const std::size_t num_page_table_entries{1ULL
|
||||
<< (address_space_width_in_bits - page_size_in_bits)};
|
||||
void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
|
||||
const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
|
||||
pointers.resize(num_page_table_entries);
|
||||
backing_addr.resize(num_page_table_entries);
|
||||
|
||||
if (has_attribute) {
|
||||
attributes.resize(num_page_table_entries);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/memory_hook.h"
|
||||
#include "common/virtual_buffer.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -20,27 +20,6 @@ enum class PageType : u8 {
|
||||
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
|
||||
/// invalidation
|
||||
RasterizerCachedMemory,
|
||||
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
|
||||
Special,
|
||||
/// Page is allocated for use.
|
||||
Allocated,
|
||||
};
|
||||
|
||||
struct SpecialRegion {
|
||||
enum class Type {
|
||||
DebugHook,
|
||||
IODevice,
|
||||
} type;
|
||||
|
||||
MemoryHookPointer handler;
|
||||
|
||||
[[nodiscard]] bool operator<(const SpecialRegion& other) const {
|
||||
return std::tie(type, handler) < std::tie(other.type, other.handler);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool operator==(const SpecialRegion& other) const {
|
||||
return std::tie(type, handler) == std::tie(other.type, other.handler);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -48,6 +27,59 @@ struct SpecialRegion {
|
||||
* mimics the way a real CPU page table works.
|
||||
*/
|
||||
struct PageTable {
|
||||
/// Number of bits reserved for attribute tagging.
|
||||
/// This can be at most the guaranteed alignment of the pointers in the page table.
|
||||
static constexpr int ATTRIBUTE_BITS = 2;
|
||||
|
||||
/**
|
||||
* Pair of host pointer and page type attribute.
|
||||
* This uses the lower bits of a given pointer to store the attribute tag.
|
||||
* Writing and reading the pointer attribute pair is guaranteed to be atomic for the same method
|
||||
* call. In other words, they are guaranteed to be synchronized at all times.
|
||||
*/
|
||||
class PageInfo {
|
||||
public:
|
||||
/// Returns the page pointer
|
||||
[[nodiscard]] u8* Pointer() const noexcept {
|
||||
return ExtractPointer(raw.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
/// Returns the page type attribute
|
||||
[[nodiscard]] PageType Type() const noexcept {
|
||||
return ExtractType(raw.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
/// Returns the page pointer and attribute pair, extracted from the same atomic read
|
||||
[[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept {
|
||||
const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
|
||||
return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
|
||||
}
|
||||
|
||||
/// Returns the raw representation of the page information.
|
||||
/// Use ExtractPointer and ExtractType to unpack the value.
|
||||
[[nodiscard]] uintptr_t Raw() const noexcept {
|
||||
return raw.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Write a page pointer and type pair atomically
|
||||
void Store(u8* pointer, PageType type) noexcept {
|
||||
raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type));
|
||||
}
|
||||
|
||||
/// Unpack a pointer from a page info raw representation
|
||||
[[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept {
|
||||
return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS));
|
||||
}
|
||||
|
||||
/// Unpack a page type from a page info raw representation
|
||||
[[nodiscard]] static PageType ExtractType(uintptr_t raw) noexcept {
|
||||
return static_cast<PageType>(raw & ((uintptr_t{1} << ATTRIBUTE_BITS) - 1));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<uintptr_t> raw;
|
||||
};
|
||||
|
||||
PageTable();
|
||||
~PageTable() noexcept;
|
||||
|
||||
@@ -58,25 +90,21 @@ struct PageTable {
|
||||
PageTable& operator=(PageTable&&) noexcept = default;
|
||||
|
||||
/**
|
||||
* Resizes the page table to be able to accomodate enough pages within
|
||||
* Resizes the page table to be able to accommodate enough pages within
|
||||
* a given address space.
|
||||
*
|
||||
* @param address_space_width_in_bits The address size width in bits.
|
||||
* @param page_size_in_bits The page size in bits.
|
||||
* @param has_attribute Whether or not this page has any backing attributes.
|
||||
*/
|
||||
void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
|
||||
bool has_attribute);
|
||||
void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
|
||||
|
||||
/**
|
||||
* Vector of memory pointers backing each page. An entry can only be non-null if the
|
||||
* corresponding entry in the `attributes` vector is of type `Memory`.
|
||||
* corresponding attribute element is of type `Memory`.
|
||||
*/
|
||||
VirtualBuffer<u8*> pointers;
|
||||
VirtualBuffer<PageInfo> pointers;
|
||||
|
||||
VirtualBuffer<u64> backing_addr;
|
||||
|
||||
VirtualBuffer<PageType> attributes;
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
||||
191
src/common/parent_of_member.h
Normal file
191
src/common/parent_of_member.h
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
namespace detail {
|
||||
template <typename T, size_t Size, size_t Align>
|
||||
struct TypedStorageImpl {
|
||||
std::aligned_storage_t<Size, Align> storage_;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
|
||||
|
||||
template <typename T>
|
||||
static constexpr T* GetPointer(TypedStorage<T>& ts) {
|
||||
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
|
||||
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
|
||||
}
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <size_t MaxDepth>
|
||||
struct OffsetOfUnionHolder {
|
||||
template <typename ParentType, typename MemberType, size_t Offset>
|
||||
union UnionImpl {
|
||||
using PaddingMember = char;
|
||||
static constexpr size_t GetOffset() {
|
||||
return Offset;
|
||||
}
|
||||
|
||||
#pragma pack(push, 1)
|
||||
struct {
|
||||
PaddingMember padding[Offset];
|
||||
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||
} data;
|
||||
#pragma pack(pop)
|
||||
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
union UnionImpl<ParentType, MemberType, 0> {
|
||||
static constexpr size_t GetOffset() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct {
|
||||
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||
} data;
|
||||
UnionImpl<ParentType, MemberType, 1> next_union;
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
union UnionImpl<ParentType, MemberType, MaxDepth> {};
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
struct OffsetOfCalculator {
|
||||
using UnionHolder =
|
||||
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
|
||||
0>;
|
||||
union Union {
|
||||
char c{};
|
||||
UnionHolder first_union;
|
||||
TypedStorage<ParentType> parent;
|
||||
|
||||
constexpr Union() : c() {}
|
||||
};
|
||||
static constexpr Union U = {};
|
||||
|
||||
static constexpr const MemberType* GetNextAddress(const MemberType* start,
|
||||
const MemberType* target) {
|
||||
while (start < target) {
|
||||
start++;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
|
||||
const MemberType* target) {
|
||||
return (target - start) * sizeof(MemberType);
|
||||
}
|
||||
|
||||
template <typename CurUnion>
|
||||
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
|
||||
CurUnion& cur_union) {
|
||||
constexpr size_t Offset = CurUnion::GetOffset();
|
||||
const auto target = std::addressof(GetPointer(U.parent)->*member);
|
||||
const auto start = std::addressof(cur_union.data.members[0]);
|
||||
const auto next = GetNextAddress(start, target);
|
||||
|
||||
if (next != target) {
|
||||
if constexpr (Offset < sizeof(MemberType) - 1) {
|
||||
return OffsetOfImpl(member, cur_union.next_union);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
return (next - start) * sizeof(MemberType) + Offset;
|
||||
}
|
||||
|
||||
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
|
||||
return OffsetOfImpl(member, U.first_union);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct GetMemberPointerTraits;
|
||||
|
||||
template <typename P, typename M>
|
||||
struct GetMemberPointerTraits<M P::*> {
|
||||
using Parent = P;
|
||||
using Member = M;
|
||||
};
|
||||
|
||||
template <auto MemberPtr>
|
||||
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
|
||||
|
||||
template <auto MemberPtr>
|
||||
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
|
||||
static inline std::ptrdiff_t OffsetOf = [] {
|
||||
using DeducedParentType = GetParentType<MemberPtr>;
|
||||
using MemberType = GetMemberType<MemberPtr>;
|
||||
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
|
||||
std::is_same<RealParentType, DeducedParentType>::value);
|
||||
|
||||
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
|
||||
}();
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
|
||||
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||
return *static_cast<RealParentType*>(
|
||||
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
|
||||
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||
return *static_cast<const RealParentType*>(static_cast<const void*>(
|
||||
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
|
||||
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
|
||||
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
@@ -49,3 +49,9 @@ ScopeExitHelper<Func> ScopeExit(Func&& func) {
|
||||
* \endcode
|
||||
*/
|
||||
#define SCOPE_EXIT(body) auto CONCAT2(scope_exit_helper_, __LINE__) = detail::ScopeExit([&]() body)
|
||||
|
||||
/**
|
||||
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
|
||||
* used when the caller might want to cancel the ScopeExit.
|
||||
*/
|
||||
#define SCOPE_GUARD(body) detail::ScopeExit([&]() body)
|
||||
|
||||
@@ -394,7 +394,7 @@ public:
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator%(const S& p, const swapped_t v);
|
||||
|
||||
// Arithmetics + assignements
|
||||
// Arithmetics + assignments
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator+=(const S& p, const swapped_t v);
|
||||
|
||||
@@ -451,7 +451,7 @@ S operator%(const S& i, const swap_struct_t<T, F> v) {
|
||||
return i % v.swap();
|
||||
}
|
||||
|
||||
// Arithmetics + assignements
|
||||
// Arithmetics + assignments
|
||||
template <typename S, typename T, typename F>
|
||||
S& operator+=(S& i, const swap_struct_t<T, F> v) {
|
||||
i += v.swap();
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <ctime>
|
||||
#include <fmt/format.h>
|
||||
#include "common/common_types.h"
|
||||
#include "common/string_util.h"
|
||||
#include "common/timer.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
std::chrono::milliseconds Timer::GetTimeMs() {
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
std::chrono::system_clock::now().time_since_epoch());
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
// Initiate, Start, Stop, and Update the time
|
||||
// --------------------------------------------
|
||||
|
||||
// Set initial values for the class
|
||||
Timer::Timer() : m_LastTime(0), m_StartTime(0), m_Running(false) {
|
||||
Update();
|
||||
}
|
||||
|
||||
// Write the starting time
|
||||
void Timer::Start() {
|
||||
m_StartTime = GetTimeMs();
|
||||
m_Running = true;
|
||||
}
|
||||
|
||||
// Stop the timer
|
||||
void Timer::Stop() {
|
||||
// Write the final time
|
||||
m_LastTime = GetTimeMs();
|
||||
m_Running = false;
|
||||
}
|
||||
|
||||
// Update the last time variable
|
||||
void Timer::Update() {
|
||||
m_LastTime = GetTimeMs();
|
||||
// TODO(ector) - QPF
|
||||
}
|
||||
|
||||
// -------------------------------------
|
||||
// Get time difference and elapsed time
|
||||
// -------------------------------------
|
||||
|
||||
// Get the number of milliseconds since the last Update()
|
||||
std::chrono::milliseconds Timer::GetTimeDifference() {
|
||||
return GetTimeMs() - m_LastTime;
|
||||
}
|
||||
|
||||
// Add the time difference since the last Update() to the starting time.
|
||||
// This is used to compensate for a paused game.
|
||||
void Timer::AddTimeDifference() {
|
||||
m_StartTime += GetTimeDifference();
|
||||
}
|
||||
|
||||
// Get the time elapsed since the Start()
|
||||
std::chrono::milliseconds Timer::GetTimeElapsed() {
|
||||
// If we have not started yet, return 1 (because then I don't
|
||||
// have to change the FPS calculation in CoreRerecording.cpp .
|
||||
if (m_StartTime.count() == 0)
|
||||
return std::chrono::milliseconds(1);
|
||||
|
||||
// Return the final timer time if the timer is stopped
|
||||
if (!m_Running)
|
||||
return (m_LastTime - m_StartTime);
|
||||
|
||||
return (GetTimeMs() - m_StartTime);
|
||||
}
|
||||
|
||||
// Get the formatted time elapsed since the Start()
|
||||
std::string Timer::GetTimeElapsedFormatted() const {
|
||||
// If we have not started yet, return zero
|
||||
if (m_StartTime.count() == 0)
|
||||
return "00:00:00:000";
|
||||
|
||||
// The number of milliseconds since the start.
|
||||
// Use a different value if the timer is stopped.
|
||||
std::chrono::milliseconds Milliseconds;
|
||||
if (m_Running)
|
||||
Milliseconds = GetTimeMs() - m_StartTime;
|
||||
else
|
||||
Milliseconds = m_LastTime - m_StartTime;
|
||||
// Seconds
|
||||
std::chrono::seconds Seconds = std::chrono::duration_cast<std::chrono::seconds>(Milliseconds);
|
||||
// Minutes
|
||||
std::chrono::minutes Minutes = std::chrono::duration_cast<std::chrono::minutes>(Milliseconds);
|
||||
// Hours
|
||||
std::chrono::hours Hours = std::chrono::duration_cast<std::chrono::hours>(Milliseconds);
|
||||
|
||||
std::string TmpStr = fmt::format("{:02}:{:02}:{:02}:{:03}", Hours.count(), Minutes.count() % 60,
|
||||
Seconds.count() % 60, Milliseconds.count() % 1000);
|
||||
return TmpStr;
|
||||
}
|
||||
|
||||
// Get the number of seconds since January 1 1970
|
||||
std::chrono::seconds Timer::GetTimeSinceJan1970() {
|
||||
return std::chrono::duration_cast<std::chrono::seconds>(GetTimeMs());
|
||||
}
|
||||
|
||||
std::chrono::seconds Timer::GetLocalTimeSinceJan1970() {
|
||||
time_t sysTime, tzDiff, tzDST;
|
||||
struct tm* gmTime;
|
||||
|
||||
time(&sysTime);
|
||||
|
||||
// Account for DST where needed
|
||||
gmTime = localtime(&sysTime);
|
||||
if (gmTime->tm_isdst == 1)
|
||||
tzDST = 3600;
|
||||
else
|
||||
tzDST = 0;
|
||||
|
||||
// Lazy way to get local time in sec
|
||||
gmTime = gmtime(&sysTime);
|
||||
tzDiff = sysTime - mktime(gmTime);
|
||||
|
||||
return std::chrono::seconds(sysTime + tzDiff + tzDST);
|
||||
}
|
||||
|
||||
// Return the current time formatted as Minutes:Seconds:Milliseconds
|
||||
// in the form 00:00:000.
|
||||
std::string Timer::GetTimeFormatted() {
|
||||
time_t sysTime;
|
||||
struct tm* gmTime;
|
||||
char tmp[13];
|
||||
|
||||
time(&sysTime);
|
||||
gmTime = localtime(&sysTime);
|
||||
|
||||
strftime(tmp, 6, "%M:%S", gmTime);
|
||||
|
||||
u64 milliseconds = static_cast<u64>(GetTimeMs().count()) % 1000;
|
||||
return fmt::format("{}:{:03}", tmp, milliseconds);
|
||||
}
|
||||
|
||||
// Returns a timestamp with decimals for precise time comparisons
|
||||
// ----------------
|
||||
double Timer::GetDoubleTime() {
|
||||
// Get continuous timestamp
|
||||
auto tmp_seconds = static_cast<u64>(GetTimeSinceJan1970().count());
|
||||
const auto ms = static_cast<double>(static_cast<u64>(GetTimeMs().count()) % 1000);
|
||||
|
||||
// Remove a few years. We only really want enough seconds to make
|
||||
// sure that we are detecting actual actions, perhaps 60 seconds is
|
||||
// enough really, but I leave a year of seconds anyway, in case the
|
||||
// user's clock is incorrect or something like that.
|
||||
tmp_seconds = tmp_seconds - (38 * 365 * 24 * 60 * 60);
|
||||
|
||||
// Make a smaller integer that fits in the double
|
||||
const auto seconds = static_cast<u32>(tmp_seconds);
|
||||
return seconds + ms;
|
||||
}
|
||||
|
||||
} // Namespace Common
|
||||
@@ -1,41 +0,0 @@
|
||||
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
class Timer {
|
||||
public:
|
||||
Timer();
|
||||
|
||||
void Start();
|
||||
void Stop();
|
||||
void Update();
|
||||
|
||||
// The time difference is always returned in milliseconds, regardless of alternative internal
|
||||
// representation
|
||||
[[nodiscard]] std::chrono::milliseconds GetTimeDifference();
|
||||
void AddTimeDifference();
|
||||
|
||||
[[nodiscard]] static std::chrono::seconds GetTimeSinceJan1970();
|
||||
[[nodiscard]] static std::chrono::seconds GetLocalTimeSinceJan1970();
|
||||
[[nodiscard]] static double GetDoubleTime();
|
||||
|
||||
[[nodiscard]] static std::string GetTimeFormatted();
|
||||
[[nodiscard]] std::string GetTimeElapsedFormatted() const;
|
||||
[[nodiscard]] std::chrono::milliseconds GetTimeElapsed();
|
||||
|
||||
[[nodiscard]] static std::chrono::milliseconds GetTimeMs();
|
||||
|
||||
private:
|
||||
std::chrono::milliseconds m_LastTime;
|
||||
std::chrono::milliseconds m_StartTime;
|
||||
bool m_Running;
|
||||
};
|
||||
|
||||
} // Namespace Common
|
||||
674
src/common/tree.h
Normal file
674
src/common/tree.h
Normal file
@@ -0,0 +1,674 @@
|
||||
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
|
||||
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
|
||||
/* $FreeBSD$ */
|
||||
|
||||
/*-
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
* This file defines data structures for red-black trees.
|
||||
*
|
||||
* A red-black tree is a binary search tree with the node color as an
|
||||
* extra attribute. It fulfills a set of conditions:
|
||||
* - every search path from the root to a leaf consists of the
|
||||
* same number of black nodes,
|
||||
* - each red node (except for the root) has a black parent,
|
||||
* - each leaf node is black.
|
||||
*
|
||||
* Every operation on a red-black tree is bounded as O(lg n).
|
||||
* The maximum height of a red-black tree is 2lg (n+1).
|
||||
*/
|
||||
|
||||
namespace Common {
|
||||
template <typename T>
|
||||
class RBHead {
|
||||
public:
|
||||
[[nodiscard]] T* Root() {
|
||||
return rbh_root;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* Root() const {
|
||||
return rbh_root;
|
||||
}
|
||||
|
||||
void SetRoot(T* root) {
|
||||
rbh_root = root;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsEmpty() const {
|
||||
return Root() == nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
T* rbh_root = nullptr;
|
||||
};
|
||||
|
||||
enum class EntryColor {
|
||||
Black,
|
||||
Red,
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class RBEntry {
|
||||
public:
|
||||
[[nodiscard]] T* Left() {
|
||||
return rbe_left;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* Left() const {
|
||||
return rbe_left;
|
||||
}
|
||||
|
||||
void SetLeft(T* left) {
|
||||
rbe_left = left;
|
||||
}
|
||||
|
||||
[[nodiscard]] T* Right() {
|
||||
return rbe_right;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* Right() const {
|
||||
return rbe_right;
|
||||
}
|
||||
|
||||
void SetRight(T* right) {
|
||||
rbe_right = right;
|
||||
}
|
||||
|
||||
[[nodiscard]] T* Parent() {
|
||||
return rbe_parent;
|
||||
}
|
||||
|
||||
[[nodiscard]] const T* Parent() const {
|
||||
return rbe_parent;
|
||||
}
|
||||
|
||||
void SetParent(T* parent) {
|
||||
rbe_parent = parent;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsBlack() const {
|
||||
return rbe_color == EntryColor::Black;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsRed() const {
|
||||
return rbe_color == EntryColor::Red;
|
||||
}
|
||||
|
||||
[[nodiscard]] EntryColor Color() const {
|
||||
return rbe_color;
|
||||
}
|
||||
|
||||
void SetColor(EntryColor color) {
|
||||
rbe_color = color;
|
||||
}
|
||||
|
||||
private:
|
||||
T* rbe_left = nullptr;
|
||||
T* rbe_right = nullptr;
|
||||
T* rbe_parent = nullptr;
|
||||
EntryColor rbe_color{};
|
||||
};
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
|
||||
return node->GetEntry();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
|
||||
return node->GetEntry();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_PARENT(Node* node) {
|
||||
return RB_ENTRY(node).Parent();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_PARENT(const Node* node) {
|
||||
return RB_ENTRY(node).Parent();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_PARENT(Node* node, Node* parent) {
|
||||
return RB_ENTRY(node).SetParent(parent);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_LEFT(Node* node) {
|
||||
return RB_ENTRY(node).Left();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_LEFT(const Node* node) {
|
||||
return RB_ENTRY(node).Left();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_LEFT(Node* node, Node* left) {
|
||||
return RB_ENTRY(node).SetLeft(left);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] Node* RB_RIGHT(Node* node) {
|
||||
return RB_ENTRY(node).Right();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
|
||||
return RB_ENTRY(node).Right();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_RIGHT(Node* node, Node* right) {
|
||||
return RB_ENTRY(node).SetRight(right);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
|
||||
return RB_ENTRY(node).IsBlack();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] bool RB_IS_RED(const Node* node) {
|
||||
return RB_ENTRY(node).IsRed();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
|
||||
return RB_ENTRY(node).Color();
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_COLOR(Node* node, EntryColor color) {
|
||||
return RB_ENTRY(node).SetColor(color);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET(Node* node, Node* parent) {
|
||||
auto& entry = RB_ENTRY(node);
|
||||
entry.SetParent(parent);
|
||||
entry.SetLeft(nullptr);
|
||||
entry.SetRight(nullptr);
|
||||
entry.SetColor(EntryColor::Red);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_SET_BLACKRED(Node* black, Node* red) {
|
||||
RB_SET_COLOR(black, EntryColor::Black);
|
||||
RB_SET_COLOR(red, EntryColor::Red);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
||||
tmp = RB_RIGHT(elm);
|
||||
RB_SET_RIGHT(elm, RB_LEFT(tmp));
|
||||
if (RB_RIGHT(elm) != nullptr) {
|
||||
RB_SET_PARENT(RB_LEFT(tmp), elm);
|
||||
}
|
||||
|
||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
||||
if (RB_PARENT(tmp) != nullptr) {
|
||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(tmp);
|
||||
}
|
||||
|
||||
RB_SET_LEFT(tmp, elm);
|
||||
RB_SET_PARENT(elm, tmp);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
|
||||
tmp = RB_LEFT(elm);
|
||||
RB_SET_LEFT(elm, RB_RIGHT(tmp));
|
||||
if (RB_LEFT(elm) != nullptr) {
|
||||
RB_SET_PARENT(RB_RIGHT(tmp), elm);
|
||||
}
|
||||
|
||||
RB_SET_PARENT(tmp, RB_PARENT(elm));
|
||||
if (RB_PARENT(tmp) != nullptr) {
|
||||
if (elm == RB_LEFT(RB_PARENT(elm))) {
|
||||
RB_SET_LEFT(RB_PARENT(elm), tmp);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(elm), tmp);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(tmp);
|
||||
}
|
||||
|
||||
RB_SET_RIGHT(tmp, elm);
|
||||
RB_SET_PARENT(elm, tmp);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
|
||||
Node* parent = nullptr;
|
||||
Node* tmp = nullptr;
|
||||
|
||||
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
|
||||
Node* gparent = RB_PARENT(parent);
|
||||
if (parent == RB_LEFT(gparent)) {
|
||||
tmp = RB_RIGHT(gparent);
|
||||
if (tmp && RB_IS_RED(tmp)) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
elm = gparent;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RB_RIGHT(parent) == elm) {
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
tmp = parent;
|
||||
parent = elm;
|
||||
elm = tmp;
|
||||
}
|
||||
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
RB_ROTATE_RIGHT(head, gparent, tmp);
|
||||
} else {
|
||||
tmp = RB_LEFT(gparent);
|
||||
if (tmp && RB_IS_RED(tmp)) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Black);
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
elm = gparent;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
tmp = parent;
|
||||
parent = elm;
|
||||
elm = tmp;
|
||||
}
|
||||
|
||||
RB_SET_BLACKRED(parent, gparent);
|
||||
RB_ROTATE_LEFT(head, gparent, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
RB_SET_COLOR(head->Root(), EntryColor::Black);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
|
||||
Node* tmp;
|
||||
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root()) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
tmp = RB_RIGHT(parent);
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
|
||||
Node* oleft;
|
||||
if ((oleft = RB_LEFT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oleft, EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
RB_ROTATE_RIGHT(head, tmp, oleft);
|
||||
tmp = RB_RIGHT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, EntryColor::Black);
|
||||
if (RB_RIGHT(tmp)) {
|
||||
RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_ROTATE_LEFT(head, parent, tmp);
|
||||
elm = head->Root();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
tmp = RB_LEFT(parent);
|
||||
if (RB_IS_RED(tmp)) {
|
||||
RB_SET_BLACKRED(tmp, parent);
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
|
||||
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
elm = parent;
|
||||
parent = RB_PARENT(elm);
|
||||
} else {
|
||||
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
|
||||
Node* oright;
|
||||
if ((oright = RB_RIGHT(tmp)) != nullptr) {
|
||||
RB_SET_COLOR(oright, EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, EntryColor::Red);
|
||||
RB_ROTATE_LEFT(head, tmp, oright);
|
||||
tmp = RB_LEFT(parent);
|
||||
}
|
||||
|
||||
RB_SET_COLOR(tmp, RB_COLOR(parent));
|
||||
RB_SET_COLOR(parent, EntryColor::Black);
|
||||
|
||||
if (RB_LEFT(tmp)) {
|
||||
RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
|
||||
}
|
||||
|
||||
RB_ROTATE_RIGHT(head, parent, tmp);
|
||||
elm = head->Root();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (elm) {
|
||||
RB_SET_COLOR(elm, EntryColor::Black);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
|
||||
Node* child = nullptr;
|
||||
Node* parent = nullptr;
|
||||
Node* old = elm;
|
||||
EntryColor color{};
|
||||
|
||||
const auto finalize = [&] {
|
||||
if (color == EntryColor::Black) {
|
||||
RB_REMOVE_COLOR(head, parent, child);
|
||||
}
|
||||
|
||||
return old;
|
||||
};
|
||||
|
||||
if (RB_LEFT(elm) == nullptr) {
|
||||
child = RB_RIGHT(elm);
|
||||
} else if (RB_RIGHT(elm) == nullptr) {
|
||||
child = RB_LEFT(elm);
|
||||
} else {
|
||||
Node* left;
|
||||
elm = RB_RIGHT(elm);
|
||||
while ((left = RB_LEFT(elm)) != nullptr) {
|
||||
elm = left;
|
||||
}
|
||||
|
||||
child = RB_RIGHT(elm);
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(child);
|
||||
}
|
||||
|
||||
if (RB_PARENT(elm) == old) {
|
||||
parent = elm;
|
||||
}
|
||||
|
||||
elm->SetEntry(old->GetEntry());
|
||||
|
||||
if (RB_PARENT(old)) {
|
||||
if (RB_LEFT(RB_PARENT(old)) == old) {
|
||||
RB_SET_LEFT(RB_PARENT(old), elm);
|
||||
} else {
|
||||
RB_SET_RIGHT(RB_PARENT(old), elm);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(elm);
|
||||
}
|
||||
RB_SET_PARENT(RB_LEFT(old), elm);
|
||||
if (RB_RIGHT(old)) {
|
||||
RB_SET_PARENT(RB_RIGHT(old), elm);
|
||||
}
|
||||
if (parent) {
|
||||
left = parent;
|
||||
}
|
||||
|
||||
return finalize();
|
||||
}
|
||||
|
||||
parent = RB_PARENT(elm);
|
||||
color = RB_COLOR(elm);
|
||||
|
||||
if (child) {
|
||||
RB_SET_PARENT(child, parent);
|
||||
}
|
||||
if (parent) {
|
||||
if (RB_LEFT(parent) == elm) {
|
||||
RB_SET_LEFT(parent, child);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, child);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(child);
|
||||
}
|
||||
|
||||
return finalize();
|
||||
}
|
||||
|
||||
// Inserts a node into the RB tree
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* parent = nullptr;
|
||||
Node* tmp = head->Root();
|
||||
int comp = 0;
|
||||
|
||||
while (tmp) {
|
||||
parent = tmp;
|
||||
comp = cmp(elm, parent);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
RB_SET(elm, parent);
|
||||
|
||||
if (parent != nullptr) {
|
||||
if (comp < 0) {
|
||||
RB_SET_LEFT(parent, elm);
|
||||
} else {
|
||||
RB_SET_RIGHT(parent, elm);
|
||||
}
|
||||
} else {
|
||||
head->SetRoot(elm);
|
||||
}
|
||||
|
||||
RB_INSERT_COLOR(head, elm);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Finds the node with the same key as elm
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* tmp = head->Root();
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(elm, tmp);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Finds the first node greater than or equal to the search key
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
|
||||
Node* tmp = head->Root();
|
||||
Node* res = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
const int comp = cmp(elm, tmp);
|
||||
if (comp < 0) {
|
||||
res = tmp;
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
// Finds the node with the same key as lelm
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
||||
Node* tmp = head->Root();
|
||||
|
||||
while (tmp) {
|
||||
const int comp = lcmp(lelm, tmp);
|
||||
if (comp < 0) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Finds the first node greater than or equal to the search key
|
||||
template <typename Node, typename CompareFunction>
|
||||
Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
|
||||
Node* tmp = head->Root();
|
||||
Node* res = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
const int comp = lcmp(lelm, tmp);
|
||||
if (comp < 0) {
|
||||
res = tmp;
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else if (comp > 0) {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
} else {
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_NEXT(Node* elm) {
|
||||
if (RB_RIGHT(elm)) {
|
||||
elm = RB_RIGHT(elm);
|
||||
while (RB_LEFT(elm)) {
|
||||
elm = RB_LEFT(elm);
|
||||
}
|
||||
} else {
|
||||
if (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
|
||||
elm = RB_PARENT(elm);
|
||||
} else {
|
||||
while (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
|
||||
elm = RB_PARENT(elm);
|
||||
}
|
||||
elm = RB_PARENT(elm);
|
||||
}
|
||||
}
|
||||
return elm;
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_PREV(Node* elm) {
|
||||
if (RB_LEFT(elm)) {
|
||||
elm = RB_LEFT(elm);
|
||||
while (RB_RIGHT(elm)) {
|
||||
elm = RB_RIGHT(elm);
|
||||
}
|
||||
} else {
|
||||
if (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
|
||||
elm = RB_PARENT(elm);
|
||||
} else {
|
||||
while (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
|
||||
elm = RB_PARENT(elm);
|
||||
}
|
||||
elm = RB_PARENT(elm);
|
||||
}
|
||||
}
|
||||
return elm;
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
|
||||
Node* tmp = head->Root();
|
||||
Node* parent = nullptr;
|
||||
|
||||
while (tmp) {
|
||||
parent = tmp;
|
||||
if (is_min) {
|
||||
tmp = RB_LEFT(tmp);
|
||||
} else {
|
||||
tmp = RB_RIGHT(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return parent;
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_MIN(RBHead<Node>* head) {
|
||||
return RB_MINMAX(head, true);
|
||||
}
|
||||
|
||||
template <typename Node>
|
||||
Node* RB_MAX(RBHead<Node>* head) {
|
||||
return RB_MINMAX(head, false);
|
||||
}
|
||||
} // namespace Common
|
||||
@@ -14,8 +14,8 @@ constexpr u128 INVALID_UUID{{0, 0}};
|
||||
|
||||
struct UUID {
|
||||
// UUIDs which are 0 are considered invalid!
|
||||
u128 uuid = INVALID_UUID;
|
||||
constexpr UUID() = default;
|
||||
u128 uuid;
|
||||
UUID() = default;
|
||||
constexpr explicit UUID(const u128& id) : uuid{id} {}
|
||||
constexpr explicit UUID(const u64 lo, const u64 hi) : uuid{{lo, hi}} {}
|
||||
|
||||
|
||||
@@ -15,10 +15,12 @@ void FreeMemoryPages(void* base, std::size_t size) noexcept;
|
||||
template <typename T>
|
||||
class VirtualBuffer final {
|
||||
public:
|
||||
static_assert(
|
||||
std::is_trivially_constructible_v<T>,
|
||||
"T must be trivially constructible, as non-trivial constructors will not be executed "
|
||||
"with the current allocator");
|
||||
// TODO: Uncomment this and change Common::PageTable::PageInfo to be trivially constructible
|
||||
// using std::atomic_ref once libc++ has support for it
|
||||
// static_assert(
|
||||
// std::is_trivially_constructible_v<T>,
|
||||
// "T must be trivially constructible, as non-trivial constructors will not be executed "
|
||||
// "with the current allocator");
|
||||
|
||||
constexpr VirtualBuffer() = default;
|
||||
explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {
|
||||
|
||||
@@ -2,19 +2,74 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <limits>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
|
||||
#pragma intrinsic(__umulh)
|
||||
#pragma intrinsic(_udiv128)
|
||||
#else
|
||||
#include <x86intrin.h>
|
||||
#endif
|
||||
|
||||
#include "common/atomic_ops.h"
|
||||
#include "common/uint128.h"
|
||||
#include "common/x64/native_clock.h"
|
||||
|
||||
namespace {
|
||||
|
||||
[[nodiscard]] u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
const auto base = static_cast<unsigned __int128>(numerator) << 64ULL;
|
||||
return static_cast<u64>(base / divisor);
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
std::array<u64, 2> r = {0, numerator};
|
||||
u64 remainder;
|
||||
#if _MSC_VER < 1923
|
||||
return udiv128(r[1], r[0], divisor, &remainder);
|
||||
#else
|
||||
return _udiv128(r[1], r[0], divisor, &remainder);
|
||||
#endif
|
||||
#else
|
||||
// This one is bit more inaccurate.
|
||||
return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
|
||||
#endif
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 MultiplyHigh(u64 a, u64 b) {
|
||||
#ifdef __SIZEOF_INT128__
|
||||
return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64;
|
||||
#elif defined(_M_X64) || defined(_M_ARM64)
|
||||
return __umulh(a, b); // MSVC
|
||||
#else
|
||||
// Generic fallback
|
||||
const u64 a_lo = u32(a);
|
||||
const u64 a_hi = a >> 32;
|
||||
const u64 b_lo = u32(b);
|
||||
const u64 b_hi = b >> 32;
|
||||
|
||||
const u64 a_x_b_hi = a_hi * b_hi;
|
||||
const u64 a_x_b_mid = a_hi * b_lo;
|
||||
const u64 b_x_a_mid = b_hi * a_lo;
|
||||
const u64 a_x_b_lo = a_lo * b_lo;
|
||||
|
||||
const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) +
|
||||
static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >>
|
||||
32;
|
||||
|
||||
const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
|
||||
|
||||
return multhi;
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace Common {
|
||||
|
||||
u64 EstimateRDTSCFrequency() {
|
||||
@@ -48,54 +103,71 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
|
||||
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
|
||||
rtsc_frequency_} {
|
||||
_mm_mfence();
|
||||
last_measure = __rdtsc();
|
||||
accumulated_ticks = 0U;
|
||||
time_point.inner.last_measure = __rdtsc();
|
||||
time_point.inner.accumulated_ticks = 0U;
|
||||
ns_rtsc_factor = GetFixedPoint64Factor(1000000000, rtsc_frequency);
|
||||
us_rtsc_factor = GetFixedPoint64Factor(1000000, rtsc_frequency);
|
||||
ms_rtsc_factor = GetFixedPoint64Factor(1000, rtsc_frequency);
|
||||
clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
|
||||
cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
|
||||
}
|
||||
|
||||
u64 NativeClock::GetRTSC() {
|
||||
std::scoped_lock scope{rtsc_serialize};
|
||||
_mm_mfence();
|
||||
const u64 current_measure = __rdtsc();
|
||||
u64 diff = current_measure - last_measure;
|
||||
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
|
||||
if (current_measure > last_measure) {
|
||||
last_measure = current_measure;
|
||||
}
|
||||
accumulated_ticks += diff;
|
||||
TimePoint new_time_point{};
|
||||
TimePoint current_time_point{};
|
||||
do {
|
||||
current_time_point.pack = time_point.pack;
|
||||
_mm_mfence();
|
||||
const u64 current_measure = __rdtsc();
|
||||
u64 diff = current_measure - current_time_point.inner.last_measure;
|
||||
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
|
||||
new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure
|
||||
? current_measure
|
||||
: current_time_point.inner.last_measure;
|
||||
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
|
||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
||||
current_time_point.pack));
|
||||
/// The clock cannot be more precise than the guest timer, remove the lower bits
|
||||
return accumulated_ticks & inaccuracy_mask;
|
||||
return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
|
||||
}
|
||||
|
||||
void NativeClock::Pause(bool is_paused) {
|
||||
if (!is_paused) {
|
||||
_mm_mfence();
|
||||
last_measure = __rdtsc();
|
||||
TimePoint current_time_point{};
|
||||
TimePoint new_time_point{};
|
||||
do {
|
||||
current_time_point.pack = time_point.pack;
|
||||
new_time_point.pack = current_time_point.pack;
|
||||
_mm_mfence();
|
||||
new_time_point.inner.last_measure = __rdtsc();
|
||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
||||
current_time_point.pack));
|
||||
}
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds NativeClock::GetTimeNS() {
|
||||
const u64 rtsc_value = GetRTSC();
|
||||
return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)};
|
||||
return std::chrono::nanoseconds{MultiplyHigh(rtsc_value, ns_rtsc_factor)};
|
||||
}
|
||||
|
||||
std::chrono::microseconds NativeClock::GetTimeUS() {
|
||||
const u64 rtsc_value = GetRTSC();
|
||||
return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)};
|
||||
return std::chrono::microseconds{MultiplyHigh(rtsc_value, us_rtsc_factor)};
|
||||
}
|
||||
|
||||
std::chrono::milliseconds NativeClock::GetTimeMS() {
|
||||
const u64 rtsc_value = GetRTSC();
|
||||
return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)};
|
||||
return std::chrono::milliseconds{MultiplyHigh(rtsc_value, ms_rtsc_factor)};
|
||||
}
|
||||
|
||||
u64 NativeClock::GetClockCycles() {
|
||||
const u64 rtsc_value = GetRTSC();
|
||||
return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency);
|
||||
return MultiplyHigh(rtsc_value, clock_rtsc_factor);
|
||||
}
|
||||
|
||||
u64 NativeClock::GetCPUCycles() {
|
||||
const u64 rtsc_value = GetRTSC();
|
||||
return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency);
|
||||
return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
|
||||
}
|
||||
|
||||
} // namespace X64
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "common/spin_lock.h"
|
||||
#include "common/wall_clock.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -32,14 +31,28 @@ public:
|
||||
private:
|
||||
u64 GetRTSC();
|
||||
|
||||
union alignas(16) TimePoint {
|
||||
TimePoint() : pack{} {}
|
||||
u128 pack{};
|
||||
struct Inner {
|
||||
u64 last_measure{};
|
||||
u64 accumulated_ticks{};
|
||||
} inner;
|
||||
};
|
||||
|
||||
/// value used to reduce the native clocks accuracy as some apss rely on
|
||||
/// undefined behavior where the level of accuracy in the clock shouldn't
|
||||
/// be higher.
|
||||
static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
|
||||
|
||||
SpinLock rtsc_serialize{};
|
||||
u64 last_measure{};
|
||||
u64 accumulated_ticks{};
|
||||
TimePoint time_point;
|
||||
// factors
|
||||
u64 clock_rtsc_factor{};
|
||||
u64 cpu_rtsc_factor{};
|
||||
u64 ns_rtsc_factor{};
|
||||
u64 us_rtsc_factor{};
|
||||
u64 ms_rtsc_factor{};
|
||||
|
||||
u64 rtsc_frequency;
|
||||
};
|
||||
} // namespace X64
|
||||
|
||||
@@ -142,8 +142,6 @@ add_library(core STATIC
|
||||
hardware_interrupt_manager.h
|
||||
hle/ipc.h
|
||||
hle/ipc_helpers.h
|
||||
hle/kernel/address_arbiter.cpp
|
||||
hle/kernel/address_arbiter.h
|
||||
hle/kernel/client_port.cpp
|
||||
hle/kernel/client_port.h
|
||||
hle/kernel/client_session.cpp
|
||||
@@ -157,13 +155,33 @@ add_library(core STATIC
|
||||
hle/kernel/handle_table.h
|
||||
hle/kernel/hle_ipc.cpp
|
||||
hle/kernel/hle_ipc.h
|
||||
hle/kernel/k_address_arbiter.cpp
|
||||
hle/kernel/k_address_arbiter.h
|
||||
hle/kernel/k_affinity_mask.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
hle/kernel/k_event.cpp
|
||||
hle/kernel/k_event.h
|
||||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
hle/kernel/k_priority_queue.h
|
||||
hle/kernel/k_readable_event.cpp
|
||||
hle/kernel/k_readable_event.h
|
||||
hle/kernel/k_resource_limit.cpp
|
||||
hle/kernel/k_resource_limit.h
|
||||
hle/kernel/k_scheduler.cpp
|
||||
hle/kernel/k_scheduler.h
|
||||
hle/kernel/k_scheduler_lock.h
|
||||
hle/kernel/k_scoped_lock.h
|
||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_queue.h
|
||||
hle/kernel/k_writable_event.cpp
|
||||
hle/kernel/k_writable_event.h
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory/address_space_info.cpp
|
||||
@@ -183,8 +201,6 @@ add_library(core STATIC
|
||||
hle/kernel/memory/slab_heap.h
|
||||
hle/kernel/memory/system_control.cpp
|
||||
hle/kernel/memory/system_control.h
|
||||
hle/kernel/mutex.cpp
|
||||
hle/kernel/mutex.h
|
||||
hle/kernel/object.cpp
|
||||
hle/kernel/object.h
|
||||
hle/kernel/physical_core.cpp
|
||||
@@ -194,10 +210,6 @@ add_library(core STATIC
|
||||
hle/kernel/process.h
|
||||
hle/kernel/process_capability.cpp
|
||||
hle/kernel/process_capability.h
|
||||
hle/kernel/readable_event.cpp
|
||||
hle/kernel/readable_event.h
|
||||
hle/kernel/resource_limit.cpp
|
||||
hle/kernel/resource_limit.h
|
||||
hle/kernel/server_port.cpp
|
||||
hle/kernel/server_port.h
|
||||
hle/kernel/server_session.cpp
|
||||
@@ -210,20 +222,14 @@ add_library(core STATIC
|
||||
hle/kernel/shared_memory.h
|
||||
hle/kernel/svc.cpp
|
||||
hle/kernel/svc.h
|
||||
hle/kernel/svc_common.h
|
||||
hle/kernel/svc_results.h
|
||||
hle/kernel/svc_types.h
|
||||
hle/kernel/svc_wrap.h
|
||||
hle/kernel/synchronization_object.cpp
|
||||
hle/kernel/synchronization_object.h
|
||||
hle/kernel/synchronization.cpp
|
||||
hle/kernel/synchronization.h
|
||||
hle/kernel/thread.cpp
|
||||
hle/kernel/thread.h
|
||||
hle/kernel/time_manager.cpp
|
||||
hle/kernel/time_manager.h
|
||||
hle/kernel/transfer_memory.cpp
|
||||
hle/kernel/transfer_memory.h
|
||||
hle/kernel/writable_event.cpp
|
||||
hle/kernel/writable_event.h
|
||||
hle/lock.cpp
|
||||
hle/lock.h
|
||||
hle/result.h
|
||||
@@ -400,8 +406,6 @@ add_library(core STATIC
|
||||
hle/service/ldr/ldr.h
|
||||
hle/service/lm/lm.cpp
|
||||
hle/service/lm/lm.h
|
||||
hle/service/lm/manager.cpp
|
||||
hle/service/lm/manager.h
|
||||
hle/service/mig/mig.cpp
|
||||
hle/service/mig/mig.h
|
||||
hle/service/mii/manager.cpp
|
||||
@@ -635,16 +639,17 @@ if (MSVC)
|
||||
/we4267
|
||||
# 'context' : truncation from 'type1' to 'type2'
|
||||
/we4305
|
||||
# 'function' : not all control paths return a value
|
||||
/we4715
|
||||
)
|
||||
else()
|
||||
target_compile_options(core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
-Werror=implicit-fallthrough
|
||||
-Werror=reorder
|
||||
-Werror=sign-compare
|
||||
-Werror=unused-variable
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
|
||||
@@ -26,9 +26,10 @@ using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CO
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
|
||||
: system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
|
||||
uses_wall_clock} {}
|
||||
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
|
||||
bool uses_wall_clock_)
|
||||
: system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
|
||||
uses_wall_clock_} {}
|
||||
virtual ~ARM_Interface() = default;
|
||||
|
||||
struct ThreadContext32 {
|
||||
|
||||
@@ -71,15 +71,9 @@ public:
|
||||
}
|
||||
|
||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||
switch (exception) {
|
||||
case Dynarmic::A32::Exception::UndefinedInstruction:
|
||||
case Dynarmic::A32::Exception::UnpredictableInstruction:
|
||||
break;
|
||||
case Dynarmic::A32::Exception::Breakpoint:
|
||||
break;
|
||||
}
|
||||
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||
LOG_CRITICAL(Core_ARM,
|
||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
@@ -133,6 +127,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table.pointers.data());
|
||||
config.absolute_offset_page_table = true;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
|
||||
@@ -180,6 +175,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
|
||||
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||
@@ -258,6 +256,9 @@ void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
jit->SaveContext(context);
|
||||
ctx.cpu_registers = context.Regs();
|
||||
@@ -267,6 +268,9 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
Dynarmic::A32::Context context;
|
||||
context.Regs() = ctx.cpu_registers;
|
||||
context.ExtRegs() = ctx.extension_registers;
|
||||
|
||||
@@ -50,6 +50,10 @@ public:
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
void ChangeProcessorID(std::size_t new_core_id) override;
|
||||
|
||||
bool IsInThumbMode() const {
|
||||
return (GetPSTATE() & 0x20) != 0;
|
||||
}
|
||||
|
||||
void SaveContext(ThreadContext32& ctx) override;
|
||||
void SaveContext(ThreadContext64& ctx) override {}
|
||||
void LoadContext(const ThreadContext32& ctx) override;
|
||||
|
||||
@@ -152,6 +152,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
// Memory
|
||||
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
@@ -211,6 +212,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_shared<Dynarmic::A64::Jit>(config);
|
||||
@@ -290,6 +294,9 @@ void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
ctx.cpu_registers = jit->GetRegisters();
|
||||
ctx.sp = jit->GetSP();
|
||||
ctx.pc = jit->GetPC();
|
||||
@@ -301,6 +308,9 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
|
||||
if (!jit) {
|
||||
return;
|
||||
}
|
||||
jit->SetRegisters(ctx.cpu_registers);
|
||||
jit->SetSP(ctx.sp);
|
||||
jit->SetPC(ctx.pc);
|
||||
|
||||
@@ -28,15 +28,14 @@
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/service/am/applets/applets.h"
|
||||
#include "core/hle/service/apm/controller.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/glue/manager.h"
|
||||
#include "core/hle/service/lm/manager.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/hle/service/time/time_manager.h"
|
||||
@@ -293,8 +292,6 @@ struct System::Impl {
|
||||
perf_stats->GetMeanFrametime());
|
||||
}
|
||||
|
||||
lm_manager.Flush();
|
||||
|
||||
is_powered_on = false;
|
||||
exit_lock = false;
|
||||
|
||||
@@ -398,7 +395,6 @@ struct System::Impl {
|
||||
|
||||
/// Service State
|
||||
Service::Glue::ARPManager arp_manager;
|
||||
Service::LM::Manager lm_manager{reporter};
|
||||
Service::Time::TimeManager time_manager;
|
||||
|
||||
/// Service manager
|
||||
@@ -720,14 +716,6 @@ const Service::APM::Controller& System::GetAPMController() const {
|
||||
return impl->apm_controller;
|
||||
}
|
||||
|
||||
Service::LM::Manager& System::GetLogManager() {
|
||||
return impl->lm_manager;
|
||||
}
|
||||
|
||||
const Service::LM::Manager& System::GetLogManager() const {
|
||||
return impl->lm_manager;
|
||||
}
|
||||
|
||||
Service::Time::TimeManager& System::GetTimeManager() {
|
||||
return impl->time_manager;
|
||||
}
|
||||
|
||||
@@ -62,10 +62,6 @@ namespace Glue {
|
||||
class ARPManager;
|
||||
}
|
||||
|
||||
namespace LM {
|
||||
class Manager;
|
||||
} // namespace LM
|
||||
|
||||
namespace SM {
|
||||
class ServiceManager;
|
||||
} // namespace SM
|
||||
@@ -351,9 +347,6 @@ public:
|
||||
[[nodiscard]] Service::APM::Controller& GetAPMController();
|
||||
[[nodiscard]] const Service::APM::Controller& GetAPMController() const;
|
||||
|
||||
[[nodiscard]] Service::LM::Manager& GetLogManager();
|
||||
[[nodiscard]] const Service::LM::Manager& GetLogManager() const;
|
||||
|
||||
[[nodiscard]] Service::Time::TimeManager& GetTimeManager();
|
||||
[[nodiscard]] const Service::Time::TimeManager& GetTimeManager() const;
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
|
||||
@@ -11,9 +11,9 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -147,7 +147,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||
while (true) {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
@@ -208,7 +208,6 @@ void CpuManager::SingleCoreRunGuestThread() {
|
||||
|
||||
void CpuManager::SingleCoreRunGuestLoop() {
|
||||
auto& kernel = system.Kernel();
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
while (true) {
|
||||
auto* physical_core = &kernel.CurrentPhysicalCore();
|
||||
system.EnterDynarmicProfile();
|
||||
@@ -217,9 +216,9 @@ void CpuManager::SingleCoreRunGuestLoop() {
|
||||
physical_core = &kernel.CurrentPhysicalCore();
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
thread->SetPhantomMode(true);
|
||||
kernel.SetIsPhantomModeForSingleCore(true);
|
||||
system.CoreTiming().Advance();
|
||||
thread->SetPhantomMode(false);
|
||||
kernel.SetIsPhantomModeForSingleCore(false);
|
||||
physical_core->ArmInterface().ClearExclusiveState();
|
||||
PreemptSingleCore();
|
||||
auto& scheduler = kernel.Scheduler(current_core);
|
||||
@@ -245,7 +244,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
while (true) {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
@@ -255,22 +254,23 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
||||
|
||||
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
{
|
||||
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
|
||||
auto& kernel = system.Kernel();
|
||||
auto& scheduler = kernel.Scheduler(current_core);
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
if (idle_count >= 4 || from_running_enviroment) {
|
||||
if (!from_running_enviroment) {
|
||||
system.CoreTiming().Idle();
|
||||
idle_count = 0;
|
||||
}
|
||||
current_thread->SetPhantomMode(true);
|
||||
kernel.SetIsPhantomModeForSingleCore(true);
|
||||
system.CoreTiming().Advance();
|
||||
current_thread->SetPhantomMode(false);
|
||||
kernel.SetIsPhantomModeForSingleCore(false);
|
||||
}
|
||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
||||
system.CoreTiming().ResetTicks();
|
||||
scheduler.Unload(scheduler.GetCurrentThread());
|
||||
|
||||
auto& next_scheduler = system.Kernel().Scheduler(current_core);
|
||||
auto& next_scheduler = kernel.Scheduler(current_core);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
|
||||
}
|
||||
|
||||
@@ -278,8 +278,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
{
|
||||
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||
scheduler.Reload(scheduler.GetCurrentThread());
|
||||
auto* currrent_thread2 = scheduler.GetCurrentThread();
|
||||
if (!currrent_thread2->IsIdleThread()) {
|
||||
if (!scheduler.IsIdle()) {
|
||||
idle_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,6 +143,7 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
|
||||
return 0x3C;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
@@ -157,6 +158,7 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
return 0x40;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SignatureType Ticket::GetSignatureType() const {
|
||||
@@ -169,8 +171,7 @@ SignatureType Ticket::GetSignatureType() const {
|
||||
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->sig_type;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
throw std::bad_variant_access{};
|
||||
}
|
||||
|
||||
TicketData& Ticket::GetData() {
|
||||
@@ -183,8 +184,7 @@ TicketData& Ticket::GetData() {
|
||||
if (auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->data;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
throw std::bad_variant_access{};
|
||||
}
|
||||
|
||||
const TicketData& Ticket::GetData() const {
|
||||
@@ -197,8 +197,7 @@ const TicketData& Ticket::GetData() const {
|
||||
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->data;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
throw std::bad_variant_access{};
|
||||
}
|
||||
|
||||
u64 Ticket::GetSize() const {
|
||||
@@ -569,6 +568,11 @@ KeyManager::KeyManager() {
|
||||
// Initialize keys
|
||||
const std::string hactool_keys_dir = Common::FS::GetHactoolConfigurationPath();
|
||||
const std::string yuzu_keys_dir = Common::FS::GetUserPath(Common::FS::UserPath::KeysDir);
|
||||
|
||||
if (!Common::FS::Exists(yuzu_keys_dir)) {
|
||||
Common::FS::CreateDir(yuzu_keys_dir);
|
||||
}
|
||||
|
||||
if (Settings::values.use_dev_keys) {
|
||||
dev_mode = true;
|
||||
AttemptLoadKeyFile(yuzu_keys_dir, hactool_keys_dir, "dev.keys", false);
|
||||
|
||||
@@ -43,17 +43,17 @@ static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
|
||||
struct IVFCHeader {
|
||||
u32_le magic;
|
||||
u32_le magic_number;
|
||||
INSERT_UNION_PADDING_BYTES(8);
|
||||
INSERT_PADDING_BYTES_NOINIT(8);
|
||||
std::array<IVFCLevel, 6> levels;
|
||||
INSERT_UNION_PADDING_BYTES(64);
|
||||
INSERT_PADDING_BYTES_NOINIT(64);
|
||||
};
|
||||
static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
|
||||
|
||||
struct NCASectionHeaderBlock {
|
||||
INSERT_UNION_PADDING_BYTES(3);
|
||||
INSERT_PADDING_BYTES_NOINIT(3);
|
||||
NCASectionFilesystemType filesystem_type;
|
||||
NCASectionCryptoType crypto_type;
|
||||
INSERT_UNION_PADDING_BYTES(3);
|
||||
INSERT_PADDING_BYTES_NOINIT(3);
|
||||
};
|
||||
static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
|
||||
|
||||
@@ -61,7 +61,7 @@ struct NCASectionRaw {
|
||||
NCASectionHeaderBlock header;
|
||||
std::array<u8, 0x138> block_data;
|
||||
std::array<u8, 0x8> section_ctr;
|
||||
INSERT_UNION_PADDING_BYTES(0xB8);
|
||||
INSERT_PADDING_BYTES_NOINIT(0xB8);
|
||||
};
|
||||
static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
|
||||
|
||||
@@ -69,19 +69,19 @@ struct PFS0Superblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
std::array<u8, 0x20> hash;
|
||||
u32_le size;
|
||||
INSERT_UNION_PADDING_BYTES(4);
|
||||
INSERT_PADDING_BYTES_NOINIT(4);
|
||||
u64_le hash_table_offset;
|
||||
u64_le hash_table_size;
|
||||
u64_le pfs0_header_offset;
|
||||
u64_le pfs0_size;
|
||||
INSERT_UNION_PADDING_BYTES(0x1B0);
|
||||
INSERT_PADDING_BYTES_NOINIT(0x1B0);
|
||||
};
|
||||
static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
|
||||
|
||||
struct RomFSSuperblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
IVFCHeader ivfc;
|
||||
INSERT_UNION_PADDING_BYTES(0x118);
|
||||
INSERT_PADDING_BYTES_NOINIT(0x118);
|
||||
};
|
||||
static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
|
||||
|
||||
@@ -89,19 +89,19 @@ struct BKTRHeader {
|
||||
u64_le offset;
|
||||
u64_le size;
|
||||
u32_le magic;
|
||||
INSERT_UNION_PADDING_BYTES(0x4);
|
||||
INSERT_PADDING_BYTES_NOINIT(0x4);
|
||||
u32_le number_entries;
|
||||
INSERT_UNION_PADDING_BYTES(0x4);
|
||||
INSERT_PADDING_BYTES_NOINIT(0x4);
|
||||
};
|
||||
static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
|
||||
|
||||
struct BKTRSuperblock {
|
||||
NCASectionHeaderBlock header_block;
|
||||
IVFCHeader ivfc;
|
||||
INSERT_UNION_PADDING_BYTES(0x18);
|
||||
INSERT_PADDING_BYTES_NOINIT(0x18);
|
||||
BKTRHeader relocation;
|
||||
BKTRHeader subsection;
|
||||
INSERT_UNION_PADDING_BYTES(0xC0);
|
||||
INSERT_PADDING_BYTES_NOINIT(0xC0);
|
||||
};
|
||||
static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
||||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
|
||||
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
|
||||
return {0, 0};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
||||
@@ -105,7 +105,8 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
|
||||
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
|
||||
return ContentRecordType::HtmlDocument;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast<u8>(type));
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
|
||||
return ContentRecordType{};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,18 +67,18 @@ public:
|
||||
virtual void Refresh() = 0;
|
||||
|
||||
virtual bool HasEntry(u64 title_id, ContentRecordType type) const = 0;
|
||||
virtual bool HasEntry(ContentProviderEntry entry) const;
|
||||
bool HasEntry(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::optional<u32> GetEntryVersion(u64 title_id) const = 0;
|
||||
|
||||
virtual VirtualFile GetEntryUnparsed(u64 title_id, ContentRecordType type) const = 0;
|
||||
virtual VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
|
||||
VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
|
||||
|
||||
virtual VirtualFile GetEntryRaw(u64 title_id, ContentRecordType type) const = 0;
|
||||
virtual VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
|
||||
VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::unique_ptr<NCA> GetEntry(u64 title_id, ContentRecordType type) const = 0;
|
||||
virtual std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
|
||||
std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::vector<ContentProviderEntry> ListEntries() const;
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ struct SaveDataAttribute {
|
||||
SaveDataType type;
|
||||
SaveDataRank rank;
|
||||
u16 index;
|
||||
INSERT_PADDING_BYTES(4);
|
||||
INSERT_PADDING_BYTES_NOINIT(4);
|
||||
u64 zero_1;
|
||||
u64 zero_2;
|
||||
u64 zero_3;
|
||||
@@ -72,7 +72,7 @@ struct SaveDataExtraData {
|
||||
u64 owner_id;
|
||||
s64 timestamp;
|
||||
SaveDataFlags flags;
|
||||
INSERT_PADDING_BYTES(4);
|
||||
INSERT_PADDING_BYTES_NOINIT(4);
|
||||
s64 available_size;
|
||||
s64 journal_size;
|
||||
s64 commit_id;
|
||||
|
||||
@@ -133,8 +133,11 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
|
||||
}
|
||||
|
||||
cache.erase(old_path);
|
||||
file->Open(new_path, "r+b");
|
||||
cache.insert_or_assign(new_path, std::move(file));
|
||||
if (file->Open(new_path, "r+b")) {
|
||||
cache.insert_or_assign(new_path, std::move(file));
|
||||
} else {
|
||||
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
@@ -214,9 +217,12 @@ VirtualDir RealVfsFilesystem::MoveDirectory(std::string_view old_path_,
|
||||
}
|
||||
|
||||
auto file = cached.lock();
|
||||
file->Open(file_new_path, "r+b");
|
||||
cache.erase(file_old_path);
|
||||
cache.insert_or_assign(std::move(file_new_path), std::move(file));
|
||||
if (file->Open(file_new_path, "r+b")) {
|
||||
cache.insert_or_assign(std::move(file_new_path), std::move(file));
|
||||
} else {
|
||||
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", file_new_path);
|
||||
}
|
||||
}
|
||||
|
||||
return OpenDirectory(new_path, Mode::ReadWrite);
|
||||
|
||||
@@ -21,21 +21,18 @@ public:
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
bool touch_pressed = false; ///< True if touchpad area is currently pressed, otherwise false
|
||||
|
||||
float touch_x = 0.0f; ///< Touchpad X-position
|
||||
float touch_y = 0.0f; ///< Touchpad Y-position
|
||||
Input::TouchStatus status;
|
||||
|
||||
private:
|
||||
class Device : public Input::TouchDevice {
|
||||
public:
|
||||
explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
|
||||
std::tuple<float, float, bool> GetStatus() const override {
|
||||
Input::TouchStatus GetStatus() const override {
|
||||
if (auto state = touch_state.lock()) {
|
||||
std::lock_guard guard{state->mutex};
|
||||
return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
|
||||
return state->status;
|
||||
}
|
||||
return std::make_tuple(0.0f, 0.0f, false);
|
||||
return {};
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -79,36 +76,44 @@ std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsi
|
||||
return std::make_tuple(new_x, new_y);
|
||||
}
|
||||
|
||||
void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
|
||||
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
|
||||
void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
|
||||
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) {
|
||||
return;
|
||||
}
|
||||
if (id >= touch_state->status.size()) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard guard{touch_state->mutex};
|
||||
touch_state->touch_x =
|
||||
const float x =
|
||||
static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
|
||||
static_cast<float>(framebuffer_layout.screen.right - framebuffer_layout.screen.left);
|
||||
touch_state->touch_y =
|
||||
const float y =
|
||||
static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
|
||||
static_cast<float>(framebuffer_layout.screen.bottom - framebuffer_layout.screen.top);
|
||||
|
||||
touch_state->touch_pressed = true;
|
||||
touch_state->status[id] = std::make_tuple(x, y, true);
|
||||
}
|
||||
|
||||
void EmuWindow::TouchReleased() {
|
||||
void EmuWindow::TouchReleased(std::size_t id) {
|
||||
if (id >= touch_state->status.size()) {
|
||||
return;
|
||||
}
|
||||
std::lock_guard guard{touch_state->mutex};
|
||||
touch_state->touch_pressed = false;
|
||||
touch_state->touch_x = 0;
|
||||
touch_state->touch_y = 0;
|
||||
touch_state->status[id] = std::make_tuple(0.0f, 0.0f, false);
|
||||
}
|
||||
|
||||
void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y) {
|
||||
if (!touch_state->touch_pressed)
|
||||
void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
|
||||
if (id >= touch_state->status.size()) {
|
||||
return;
|
||||
}
|
||||
if (!std::get<2>(touch_state->status[id]))
|
||||
return;
|
||||
|
||||
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
|
||||
std::tie(framebuffer_x, framebuffer_y) = ClipToTouchScreen(framebuffer_x, framebuffer_y);
|
||||
|
||||
TouchPressed(framebuffer_x, framebuffer_y);
|
||||
TouchPressed(framebuffer_x, framebuffer_y, id);
|
||||
}
|
||||
|
||||
void EmuWindow::UpdateCurrentFramebufferLayout(unsigned width, unsigned height) {
|
||||
|
||||
@@ -117,18 +117,23 @@ public:
|
||||
* Signal that a touch pressed event has occurred (e.g. mouse click pressed)
|
||||
* @param framebuffer_x Framebuffer x-coordinate that was pressed
|
||||
* @param framebuffer_y Framebuffer y-coordinate that was pressed
|
||||
* @param id Touch event ID
|
||||
*/
|
||||
void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y);
|
||||
void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
|
||||
|
||||
/// Signal that a touch released event has occurred (e.g. mouse click released)
|
||||
void TouchReleased();
|
||||
/**
|
||||
* Signal that a touch released event has occurred (e.g. mouse click released)
|
||||
* @param id Touch event ID
|
||||
*/
|
||||
void TouchReleased(std::size_t id);
|
||||
|
||||
/**
|
||||
* Signal that a touch movement event has occurred (e.g. mouse was moved over the emu window)
|
||||
* @param framebuffer_x Framebuffer x-coordinate
|
||||
* @param framebuffer_y Framebuffer y-coordinate
|
||||
* @param id Touch event ID
|
||||
*/
|
||||
void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y);
|
||||
void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
|
||||
|
||||
/**
|
||||
* Returns currently active configuration.
|
||||
|
||||
@@ -163,10 +163,11 @@ using MotionStatus = std::tuple<Common::Vec3<float>, Common::Vec3<float>, Common
|
||||
using MotionDevice = InputDevice<MotionStatus>;
|
||||
|
||||
/**
|
||||
* A touch status is an object that returns a tuple of two floats and a bool. The floats are
|
||||
* x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is pressed.
|
||||
* A touch status is an object that returns an array of 16 tuple elements of two floats and a bool.
|
||||
* The floats are x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is
|
||||
* pressed.
|
||||
*/
|
||||
using TouchStatus = std::tuple<float, float, bool>;
|
||||
using TouchStatus = std::array<std::tuple<float, float, bool>, 16>;
|
||||
|
||||
/**
|
||||
* A touch device is an input device that returns a touch status object
|
||||
|
||||
@@ -25,6 +25,10 @@ void InputInterpreter::PollInput() {
|
||||
button_states[current_index] = button_state;
|
||||
}
|
||||
|
||||
bool InputInterpreter::IsButtonPressed(HIDButton button) const {
|
||||
return (button_states[current_index] & (1U << static_cast<u8>(button))) != 0;
|
||||
}
|
||||
|
||||
bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const {
|
||||
const bool current_press =
|
||||
(button_states[current_index] & (1U << static_cast<u8>(button))) != 0;
|
||||
|
||||
@@ -66,6 +66,27 @@ public:
|
||||
/// Gets a button state from HID and inserts it into the array of button states.
|
||||
void PollInput();
|
||||
|
||||
/**
|
||||
* Checks whether the button is pressed.
|
||||
*
|
||||
* @param button The button to check.
|
||||
*
|
||||
* @returns True when the button is pressed.
|
||||
*/
|
||||
[[nodiscard]] bool IsButtonPressed(HIDButton button) const;
|
||||
|
||||
/**
|
||||
* Checks whether any of the buttons in the parameter list is pressed.
|
||||
*
|
||||
* @tparam HIDButton The buttons to check.
|
||||
*
|
||||
* @returns True when at least one of the buttons is pressed.
|
||||
*/
|
||||
template <HIDButton... T>
|
||||
[[nodiscard]] bool IsAnyButtonPressed() {
|
||||
return (IsButtonPressed(T) || ...);
|
||||
}
|
||||
|
||||
/**
|
||||
* The specified button is considered to be pressed once
|
||||
* if it is currently pressed and not pressed previously.
|
||||
@@ -79,12 +100,12 @@ public:
|
||||
/**
|
||||
* Checks whether any of the buttons in the parameter list is pressed once.
|
||||
*
|
||||
* @tparam HIDButton The buttons to check.
|
||||
* @tparam T The buttons to check.
|
||||
*
|
||||
* @returns True when at least one of the buttons is pressed once.
|
||||
*/
|
||||
template <HIDButton... T>
|
||||
[[nodiscard]] bool IsAnyButtonPressedOnce() {
|
||||
[[nodiscard]] bool IsAnyButtonPressedOnce() const {
|
||||
return (IsButtonPressedOnce(T) || ...);
|
||||
}
|
||||
|
||||
@@ -100,12 +121,12 @@ public:
|
||||
/**
|
||||
* Checks whether any of the buttons in the parameter list is held down.
|
||||
*
|
||||
* @tparam HIDButton The buttons to check.
|
||||
* @tparam T The buttons to check.
|
||||
*
|
||||
* @returns True when at least one of the buttons is held down.
|
||||
*/
|
||||
template <HIDButton... T>
|
||||
[[nodiscard]] bool IsAnyButtonHeld() {
|
||||
[[nodiscard]] bool IsAnyButtonHeld() const {
|
||||
return (IsButtonHeld(T) || ...);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -18,34 +20,12 @@ constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch cpu frequency is 1020MHz u
|
||||
constexpr u64 CNTFREQ = 19200000; // Switch's hardware clock speed
|
||||
constexpr u32 NUM_CPU_CORES = 4; // Number of CPU Cores
|
||||
|
||||
} // namespace Hardware
|
||||
|
||||
constexpr u32 INVALID_HOST_THREAD_ID = 0xFFFFFFFF;
|
||||
|
||||
struct EmuThreadHandle {
|
||||
u32 host_handle;
|
||||
u32 guest_handle;
|
||||
|
||||
u64 GetRaw() const {
|
||||
return (static_cast<u64>(host_handle) << 32) | guest_handle;
|
||||
}
|
||||
|
||||
bool operator==(const EmuThreadHandle& rhs) const {
|
||||
return std::tie(host_handle, guest_handle) == std::tie(rhs.host_handle, rhs.guest_handle);
|
||||
}
|
||||
|
||||
bool operator!=(const EmuThreadHandle& rhs) const {
|
||||
return !operator==(rhs);
|
||||
}
|
||||
|
||||
static constexpr EmuThreadHandle InvalidHandle() {
|
||||
constexpr u32 invalid_handle = 0xFFFFFFFF;
|
||||
return {invalid_handle, invalid_handle};
|
||||
}
|
||||
|
||||
bool IsInvalid() const {
|
||||
return (*this) == InvalidHandle();
|
||||
}
|
||||
// Virtual to Physical core map.
|
||||
constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
|
||||
0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
|
||||
};
|
||||
|
||||
} // namespace Hardware
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -146,7 +146,7 @@ static_assert(sizeof(BufferDescriptorC) == 8, "BufferDescriptorC size is incorre
|
||||
|
||||
struct DataPayloadHeader {
|
||||
u32_le magic;
|
||||
INSERT_PADDING_WORDS(1);
|
||||
INSERT_PADDING_WORDS_NOINIT(1);
|
||||
};
|
||||
static_assert(sizeof(DataPayloadHeader) == 8, "DataPayloadHeader size is incorrect");
|
||||
|
||||
@@ -160,7 +160,7 @@ struct DomainMessageHeader {
|
||||
// Used when responding to an IPC request, Server -> Client.
|
||||
struct {
|
||||
u32_le num_objects;
|
||||
INSERT_UNION_PADDING_WORDS(3);
|
||||
INSERT_PADDING_WORDS_NOINIT(3);
|
||||
};
|
||||
|
||||
// Used when performing an IPC request, Client -> Server.
|
||||
@@ -171,10 +171,10 @@ struct DomainMessageHeader {
|
||||
BitField<16, 16, u32> size;
|
||||
};
|
||||
u32_le object_id;
|
||||
INSERT_UNION_PADDING_WORDS(2);
|
||||
INSERT_PADDING_WORDS_NOINIT(2);
|
||||
};
|
||||
|
||||
std::array<u32, 4> raw{};
|
||||
std::array<u32, 4> raw;
|
||||
};
|
||||
};
|
||||
static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect");
|
||||
|
||||
@@ -1,317 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0) {
|
||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
||||
}
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->WaitForArbitration(false);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
switch (type) {
|
||||
case SignalType::Signal:
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
case SignalType::IncrementAndSignalIfEqual:
|
||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
u32 current_value;
|
||||
do {
|
||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (current_value != static_cast<u32>(value)) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
current_value++;
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
||||
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Determine the modified value depending on the waiting count.
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
||||
s64 timeout_ns) {
|
||||
switch (type) {
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
||||
case ArbitrationType::WaitIfEqual:
|
||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value >= value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
s32 decrement_value;
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
do {
|
||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
if (should_decrement) {
|
||||
decrement_value = current_value - 1;
|
||||
} else {
|
||||
decrement_value = current_value;
|
||||
}
|
||||
} while (
|
||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value != value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter =
|
||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||
return entry->GetPriority() >= thread->GetPriority();
|
||||
});
|
||||
|
||||
if (iter == thread_list.cend()) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
} else {
|
||||
thread_list.insert(iter, std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||
[&thread](const auto& entry) { return thread == entry; });
|
||||
|
||||
if (iter != thread_list.cend()) {
|
||||
thread_list.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||
VAddr address) const {
|
||||
const auto iter = arb_threads.find(address);
|
||||
if (iter == arb_threads.cend()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||
return {thread_list.cbegin(), thread_list.cend()};
|
||||
}
|
||||
} // namespace Kernel
|
||||
@@ -1,91 +0,0 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
class AddressArbiter {
|
||||
public:
|
||||
enum class ArbitrationType {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class SignalType {
|
||||
Signal = 0,
|
||||
IncrementAndSignalIfEqual = 1,
|
||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
||||
};
|
||||
|
||||
explicit AddressArbiter(Core::System& system);
|
||||
~AddressArbiter();
|
||||
|
||||
AddressArbiter(const AddressArbiter&) = delete;
|
||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
||||
|
||||
AddressArbiter(AddressArbiter&&) = default;
|
||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
||||
|
||||
/// Signals an address being waited on with a particular signaling type.
|
||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
||||
/// equal to the value argument.
|
||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake);
|
||||
|
||||
/// Waits on an address if the value passed is less than the argument value,
|
||||
/// optionally decrementing.
|
||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement);
|
||||
|
||||
/// Waits on an address if the value passed is equal to the argument value.
|
||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
server_port->AppendPendingSession(std::move(server));
|
||||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->Signal();
|
||||
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,8 @@ public:
|
||||
*/
|
||||
void ConnectionClosed();
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
|
||||
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
|
||||
|
||||
@@ -5,14 +5,14 @@
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
|
||||
ClientSession::~ClientSession() {
|
||||
// This destructor will be called automatically when the last ClientSession handle is closed by
|
||||
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ClientSession::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ClientSession::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool ClientSession::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
@@ -47,7 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
|
||||
return MakeResult(std::move(client_session));
|
||||
}
|
||||
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
|
||||
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Keep ServerSession alive until we're done working with it.
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
@@ -24,9 +24,9 @@ namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Session;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
class ClientSession final : public SynchronizationObject {
|
||||
class ClientSession final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
@@ -46,15 +46,13 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
private:
|
||||
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
|
||||
@@ -13,12 +13,14 @@ namespace Kernel {
|
||||
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
|
||||
@@ -28,6 +30,7 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
|
||||
|
||||
@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
|
||||
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_priority_queue.h"
|
||||
#include "core/hle/kernel/k_scheduler_lock.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -20,8 +21,12 @@ class KernelCore;
|
||||
class SchedulerLock;
|
||||
|
||||
using KSchedulerPriorityQueue =
|
||||
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
|
||||
constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, Svc::LowestThreadPriority,
|
||||
Svc::HighestThreadPriority>;
|
||||
|
||||
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
||||
static_assert(Svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
|
||||
static_assert(Svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
||||
|
||||
class GlobalSchedulerContext final {
|
||||
friend class KScheduler;
|
||||
@@ -33,13 +38,13 @@ public:
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(std::shared_ptr<Thread> thread);
|
||||
void AddThread(std::shared_ptr<KThread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
void RemoveThread(std::shared_ptr<KThread> thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
[[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
|
||||
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
@@ -74,7 +79,7 @@ private:
|
||||
LockType scheduler_lock;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
std::vector<std::shared_ptr<KThread>> thread_list;
|
||||
Common::SpinLock global_list_guard{};
|
||||
};
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
@@ -89,6 +89,10 @@ ResultCode HandleTable::Close(Handle handle) {
|
||||
|
||||
const u16 slot = GetSlot(handle);
|
||||
|
||||
if (objects[slot].use_count() == 1) {
|
||||
objects[slot]->Finalize();
|
||||
}
|
||||
|
||||
objects[slot] = nullptr;
|
||||
|
||||
generations[slot] = next_free_slot;
|
||||
|
||||
@@ -17,16 +17,16 @@
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -48,7 +48,7 @@ void SessionRequestHandler::ClientDisconnected(
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> server_session,
|
||||
std::shared_ptr<Thread> thread)
|
||||
std::shared_ptr<KThread> thread)
|
||||
: server_session(std::move(server_session)),
|
||||
thread(std::move(thread)), kernel{kernel}, memory{memory} {
|
||||
cmd_buf[0] = 0;
|
||||
@@ -182,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
|
||||
auto& owner_process = *thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
@@ -338,6 +338,28 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
|
||||
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
|
||||
BufferDescriptorA()[buffer_index].Size()};
|
||||
|
||||
if (is_buffer_a) {
|
||||
return BufferDescriptorA().size() > buffer_index;
|
||||
} else {
|
||||
return BufferDescriptorX().size() > buffer_index;
|
||||
}
|
||||
}
|
||||
|
||||
bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
|
||||
const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
|
||||
BufferDescriptorB()[buffer_index].Size()};
|
||||
|
||||
if (is_buffer_b) {
|
||||
return BufferDescriptorB().size() > buffer_index;
|
||||
} else {
|
||||
return BufferDescriptorC().size() > buffer_index;
|
||||
}
|
||||
}
|
||||
|
||||
std::string HLERequestContext::Description() const {
|
||||
if (!command_header) {
|
||||
return "No command header available";
|
||||
|
||||
@@ -40,9 +40,9 @@ class HLERequestContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class ServerSession;
|
||||
class Thread;
|
||||
class ReadableEvent;
|
||||
class WritableEvent;
|
||||
class KThread;
|
||||
class KReadableEvent;
|
||||
class KWritableEvent;
|
||||
|
||||
enum class ThreadWakeupReason;
|
||||
|
||||
@@ -110,7 +110,7 @@ class HLERequestContext {
|
||||
public:
|
||||
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> session,
|
||||
std::shared_ptr<Thread> thread);
|
||||
std::shared_ptr<KThread> thread);
|
||||
~HLERequestContext();
|
||||
|
||||
/// Returns a pointer to the IPC command buffer for this request.
|
||||
@@ -126,15 +126,12 @@ public:
|
||||
return server_session;
|
||||
}
|
||||
|
||||
using WakeupCallback = std::function<void(
|
||||
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
|
||||
/// Writes data from this context back to the requesting process/thread.
|
||||
ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
|
||||
ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
|
||||
|
||||
u32_le GetCommand() const {
|
||||
return command;
|
||||
@@ -207,6 +204,12 @@ public:
|
||||
/// Helper function to get the size of the output buffer
|
||||
std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to test whether the input buffer at buffer_index can be read
|
||||
bool CanReadBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
/// Helper function to test whether the output buffer at buffer_index can be written
|
||||
bool CanWriteBuffer(std::size_t buffer_index = 0) const;
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetCopyObject(std::size_t index) {
|
||||
return DynamicObjectCast<T>(copy_objects.at(index));
|
||||
@@ -261,11 +264,11 @@ public:
|
||||
|
||||
std::string Description() const;
|
||||
|
||||
Thread& GetThread() {
|
||||
KThread& GetThread() {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
const Thread& GetThread() const {
|
||||
const KThread& GetThread() const {
|
||||
return *thread;
|
||||
}
|
||||
|
||||
@@ -280,7 +283,7 @@ private:
|
||||
|
||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||
std::shared_ptr<Kernel::ServerSession> server_session;
|
||||
std::shared_ptr<Thread> thread;
|
||||
std::shared_ptr<KThread> thread;
|
||||
// TODO(yuriks): Check common usage of this and optimize size accordingly
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
|
||||
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
|
||||
|
||||
365
src/core/hle/kernel/k_address_arbiter.cpp
Normal file
365
src/core/hle/kernel/k_address_arbiter.cpp
Normal file
@@ -0,0 +1,365 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KAddressArbiter::~KAddressArbiter() = default;
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value < value) {
|
||||
// If less than, we want to try to decrement.
|
||||
const s32 decrement_value = current_value - 1;
|
||||
|
||||
// Decrement and try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
|
||||
// If we failed to store, try again.
|
||||
DecrementIfLessThan(system, out, address, value);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish
|
||||
monitor.ClearExclusive();
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = current_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value == value) {
|
||||
// If equal, we want to try to write the new value.
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
|
||||
// If we failed to store, try again.
|
||||
UpdateIfEqual(system, out, address, value, new_value);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish.
|
||||
monitor.ClearExclusive();
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = current_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
|
||||
Svc::ResultInvalidCurrentMemory);
|
||||
|
||||
if (user_value != value) {
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
// Determine the updated value.
|
||||
s32 new_value{};
|
||||
if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
|
||||
if (count <= 0) {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
new_value = value - 2;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while ((++tmp_it != thread_tree.end()) &&
|
||||
(tmp_it->GetAddressArbiterKey() == addr)) {
|
||||
if ((tmp_num_waiters++) >= count) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp_num_waiters < count) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value;
|
||||
}
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (count <= 0) {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
|
||||
(tmp_num_waiters < count + 1)) {
|
||||
++tmp_num_waiters;
|
||||
++tmp_it;
|
||||
}
|
||||
|
||||
if (tmp_num_waiters == 0) {
|
||||
new_value = value + 1;
|
||||
} else if (tmp_num_waiters <= count) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (value != new_value) {
|
||||
succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
|
||||
|
||||
if (user_value != value) {
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (decrement) {
|
||||
succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is less than the specified one.
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, std::addressof(user_value), addr)) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is equal.
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
70
src/core/hle/kernel/k_address_arbiter.h
Normal file
70
src/core/hle/kernel/k_address_arbiter.h
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KAddressArbiter {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
|
||||
s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
return SignalAndIncrementIfEqual(addr, value, count);
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, true, timeout);
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -27,7 +27,7 @@ public:
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
|
||||
return this->mask & GetCoreBit(core);
|
||||
return (this->mask & GetCoreBit(core)) != 0;
|
||||
}
|
||||
|
||||
constexpr void SetAffinity(s32 core, bool set) {
|
||||
|
||||
345
src/core/hle/kernel/k_condition_variable.cpp
Normal file
345
src/core/hle/kernel/k_condition_variable.cpp
Normal file
@@ -0,0 +1,345 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||
system.Memory().Write32(address, *p);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||
u32 new_orr_mask) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// Load the value from the address.
|
||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
// Orr in the new mask.
|
||||
u32 value = expected | new_orr_mask;
|
||||
|
||||
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
||||
if (!expected) {
|
||||
value = if_zero;
|
||||
}
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
|
||||
// If we failed to store, try again.
|
||||
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = expected;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
KThread* next_owner_thread =
|
||||
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Determine the next tag.
|
||||
u32 next_value{};
|
||||
if (next_owner_thread) {
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= Svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (next_owner_thread) {
|
||||
next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Wait for the address.
|
||||
{
|
||||
std::shared_ptr<KThread> owner_thread;
|
||||
ASSERT(!owner_thread);
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
|
||||
|
||||
{
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||
Svc::ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
|
||||
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
|
||||
|
||||
// Update the lock.
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
ASSERT(owner_thread);
|
||||
}
|
||||
|
||||
// Remove the thread as a waiter from the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KThread* owner_thread = cur_thread->GetLockOwner();
|
||||
if (owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Update the tag.
|
||||
VAddr address = thread->GetAddressKey();
|
||||
u32 own_tag = thread->GetAddressKeyValue();
|
||||
|
||||
u32 prev_tag{};
|
||||
bool can_access{};
|
||||
{
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
can_access = true;
|
||||
if (can_access) {
|
||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||
Svc::HandleWaitMask);
|
||||
}
|
||||
}
|
||||
|
||||
KThread* thread_to_close = nullptr;
|
||||
if (can_access) {
|
||||
if (prev_tag == InvalidHandle) {
|
||||
// If nobody held the lock previously, we're all good.
|
||||
thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
thread->Wakeup();
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
|
||||
prev_tag & ~Svc::HandleWaitMask);
|
||||
|
||||
if (owner_thread) {
|
||||
// Add the thread as a waiter on the owner.
|
||||
owner_thread->AddWaiter(thread);
|
||||
thread_to_close = owner_thread.get();
|
||||
} else {
|
||||
// The lock was tagged with a thread that doesn't exist.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
||||
thread->Wakeup();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the address wasn't accessible, note so.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
thread->Wakeup();
|
||||
}
|
||||
|
||||
return thread_to_close;
|
||||
}
|
||||
|
||||
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
// Prepare for signaling.
|
||||
constexpr int MaxThreads = 16;
|
||||
|
||||
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
|
||||
// std::shared_ptr.
|
||||
std::vector<std::shared_ptr<KThread>> thread_list;
|
||||
std::array<KThread*, MaxThreads> thread_array;
|
||||
s32 num_to_close{};
|
||||
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||
if (num_to_close < MaxThreads) {
|
||||
thread_array[num_to_close++] = thread;
|
||||
} else {
|
||||
thread_list.push_back(SharedFrom(thread));
|
||||
}
|
||||
}
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearConditionVariable();
|
||||
++num_waiters;
|
||||
}
|
||||
|
||||
// If we have no waiters, clear the has waiter flag.
|
||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
const u32 has_waiter_flag{};
|
||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||
}
|
||||
}
|
||||
|
||||
// Close threads in the array.
|
||||
for (auto i = 0; i < num_to_close; ++i) {
|
||||
thread_array[i]->Close();
|
||||
}
|
||||
|
||||
// Close threads in the list.
|
||||
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
||||
(*it)->Close();
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Update the value and process for the next owner.
|
||||
{
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
KThread* next_owner_thread =
|
||||
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Update for the next owner thread.
|
||||
u32 next_value{};
|
||||
if (next_owner_thread != nullptr) {
|
||||
// Get the next tag value.
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= Svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
// Wake up the next owner.
|
||||
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write to the cv key.
|
||||
{
|
||||
const u32 has_waiter_flag = 1;
|
||||
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
||||
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
// Update condition variable tracking.
|
||||
{
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
}
|
||||
|
||||
// If the timeout is non-zero, set the thread as waiting.
|
||||
if (timeout != 0) {
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||
|
||||
// Remove from the condition variable.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
|
||||
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearConditionVariable();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
59
src/core/hle/kernel/k_condition_variable.h
Normal file
59
src/core/hle/kernel/k_condition_variable.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||
|
||||
explicit KConditionVariable(Core::System& system_);
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
[[nodiscard]] KThread* SignalImpl(KThread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->erase(tree->iterator_to(*thread));
|
||||
}
|
||||
|
||||
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->insert(*thread);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
32
src/core/hle/kernel/k_event.cpp
Normal file
32
src/core/hle/kernel/k_event.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2021 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {}
|
||||
|
||||
KEvent::~KEvent() = default;
|
||||
|
||||
std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) {
|
||||
return std::make_shared<KEvent>(kernel, std::move(name));
|
||||
}
|
||||
|
||||
void KEvent::Initialize() {
|
||||
// Create our sub events.
|
||||
readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable");
|
||||
writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable");
|
||||
|
||||
// Initialize our sub sessions.
|
||||
readable_event->Initialize(this);
|
||||
writable_event->Initialize(this);
|
||||
|
||||
// Mark initialized.
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
57
src/core/hle/kernel/k_event.h
Normal file
57
src/core/hle/kernel/k_event.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2021 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KReadableEvent;
|
||||
class KWritableEvent;
|
||||
|
||||
class KEvent final : public Object {
|
||||
public:
|
||||
explicit KEvent(KernelCore& kernel, std::string&& name);
|
||||
~KEvent() override;
|
||||
|
||||
static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name);
|
||||
|
||||
void Initialize();
|
||||
|
||||
void Finalize() override {}
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "KEvent";
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
std::shared_ptr<KReadableEvent>& GetReadableEvent() {
|
||||
return readable_event;
|
||||
}
|
||||
|
||||
std::shared_ptr<KWritableEvent>& GetWritableEvent() {
|
||||
return writable_event;
|
||||
}
|
||||
|
||||
const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
|
||||
return readable_event;
|
||||
}
|
||||
|
||||
const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
|
||||
return writable_event;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<KReadableEvent> readable_event;
|
||||
std::shared_ptr<KWritableEvent> writable_event;
|
||||
bool initialized{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
57
src/core/hle/kernel/k_light_condition_variable.h
Normal file
57
src/core/hle/kernel/k_light_condition_variable.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
class KernelCore;
|
||||
|
||||
class KLightConditionVariable {
|
||||
public:
|
||||
explicit KLightConditionVariable(KernelCore& kernel) : thread_queue(kernel), kernel(kernel) {}
|
||||
|
||||
void Wait(KLightLock* lock, s64 timeout = -1) {
|
||||
WaitImpl(lock, timeout);
|
||||
lock->Lock();
|
||||
}
|
||||
|
||||
void Broadcast() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
while (thread_queue.WakeupFrontThread() != nullptr) {
|
||||
// We want to signal all threads, and so should continue waking up until there's nothing
|
||||
// to wake.
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void WaitImpl(KLightLock* lock, s64 timeout) {
|
||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Sleep the thread.
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
|
||||
lock->Unlock();
|
||||
|
||||
if (!thread_queue.SleepThread(owner)) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel the task that the sleep setup.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(owner);
|
||||
}
|
||||
KThreadQueue thread_queue;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
} // namespace Kernel
|
||||
130
src/core/hle/kernel/k_light_lock.cpp
Normal file
130
src/core/hle/kernel/k_light_lock.cpp
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
void KLightLock::Lock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
||||
|
||||
while (true) {
|
||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||
|
||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
|
||||
std::memory_order_acquire)) {
|
||||
if ((old_tag | 1) == cur_thread_tag) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
|
||||
break;
|
||||
}
|
||||
|
||||
LockSlowPath(old_tag | 1, cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void KLightLock::Unlock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
uintptr_t expected = cur_thread;
|
||||
do {
|
||||
if (expected != cur_thread) {
|
||||
return UnlockSlowPath(cur_thread);
|
||||
}
|
||||
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
||||
}
|
||||
|
||||
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||
|
||||
// Pend the current thread waiting on the owner thread.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Ensure we actually have locking to do.
|
||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add the current thread as a waiter on the owner.
|
||||
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
|
||||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
// Set thread states.
|
||||
if (cur_thread->GetState() == ThreadState::Runnable) {
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
} else {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
if (owner_thread->IsSuspended()) {
|
||||
owner_thread->ContinueIfHasKernelWaiters();
|
||||
}
|
||||
}
|
||||
|
||||
// We're no longer waiting on the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KThread* owner_thread = cur_thread->GetLockOwner();
|
||||
if (owner_thread) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
KThread* owner_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||
|
||||
// Unlock.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Get the next owner.
|
||||
s32 num_waiters = 0;
|
||||
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
||||
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
|
||||
// Pass the lock to the next owner.
|
||||
uintptr_t next_tag = 0;
|
||||
if (next_owner) {
|
||||
next_tag = reinterpret_cast<uintptr_t>(next_owner);
|
||||
if (num_waiters > 1) {
|
||||
next_tag |= 0x1;
|
||||
}
|
||||
|
||||
if (next_owner->GetState() == ThreadState::Waiting) {
|
||||
next_owner->SetState(ThreadState::Runnable);
|
||||
} else {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
if (next_owner->IsSuspended()) {
|
||||
next_owner->ContinueIfHasKernelWaiters();
|
||||
}
|
||||
}
|
||||
|
||||
// We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if
|
||||
// so.
|
||||
if (owner_thread->IsSuspended()) {
|
||||
owner_thread->TrySuspend();
|
||||
}
|
||||
|
||||
// Write the new tag value.
|
||||
tag.store(next_tag);
|
||||
}
|
||||
}
|
||||
|
||||
bool KLightLock::IsLockedByCurrentThread() const {
|
||||
return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
41
src/core/hle/kernel/k_light_lock.h
Normal file
41
src/core/hle/kernel/k_light_lock.h
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KLightLock {
|
||||
public:
|
||||
explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
|
||||
void Lock();
|
||||
|
||||
void Unlock();
|
||||
|
||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
bool IsLocked() const {
|
||||
return tag != 0;
|
||||
}
|
||||
|
||||
bool IsLockedByCurrentThread() const;
|
||||
|
||||
private:
|
||||
std::atomic<uintptr_t> tag{};
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -8,27 +8,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <concepts>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_set.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/concepts.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
template <typename T>
|
||||
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
|
||||
{ t.GetAffinityMask() }
|
||||
->Common::ConvertibleTo<u64>;
|
||||
{t.SetAffinityMask(std::declval<u64>())};
|
||||
{t.SetAffinityMask(0)};
|
||||
|
||||
{ t.GetAffinity(std::declval<int32_t>()) }
|
||||
{ t.GetAffinity(0) }
|
||||
->std::same_as<bool>;
|
||||
{t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
|
||||
{t.SetAffinity(0, false)};
|
||||
{t.SetAll()};
|
||||
};
|
||||
|
||||
@@ -42,11 +42,11 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
|
||||
->std::same_as<T*>;
|
||||
{ (typename T::QueueEntry()).GetPrev() }
|
||||
->std::same_as<T*>;
|
||||
{ t.GetPriorityQueueEntry(std::declval<s32>()) }
|
||||
{ t.GetPriorityQueueEntry(0) }
|
||||
->std::same_as<typename T::QueueEntry&>;
|
||||
|
||||
{t.GetAffinityMask()};
|
||||
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
|
||||
{ std::remove_cvref_t<decltype(t.GetAffinityMask())>() }
|
||||
->KPriorityQueueAffinityMask;
|
||||
|
||||
{ t.GetActiveCore() }
|
||||
@@ -55,17 +55,17 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
|
||||
->Common::ConvertibleTo<s32>;
|
||||
};
|
||||
|
||||
template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
|
||||
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
|
||||
requires KPriorityQueueMember<Member> class KPriorityQueue {
|
||||
public:
|
||||
using AffinityMaskType = typename std::remove_cv_t<
|
||||
typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
|
||||
using AffinityMaskType = std::remove_cv_t<
|
||||
std::remove_reference_t<decltype(std::declval<Member>().GetAffinityMask())>>;
|
||||
|
||||
static_assert(LowestPriority >= 0);
|
||||
static_assert(HighestPriority >= 0);
|
||||
static_assert(LowestPriority >= HighestPriority);
|
||||
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
|
||||
static constexpr size_t NumCores = _NumCores;
|
||||
static constexpr size_t NumCores = NumCores_;
|
||||
|
||||
static constexpr bool IsValidCore(s32 core) {
|
||||
return 0 <= core && core < static_cast<s32>(NumCores);
|
||||
@@ -268,7 +268,7 @@ private:
|
||||
}
|
||||
|
||||
constexpr s32 GetNextCore(u64& affinity) {
|
||||
const s32 core = Common::CountTrailingZeroes64(affinity);
|
||||
const s32 core = std::countr_zero(affinity);
|
||||
ClearAffinityBit(affinity, core);
|
||||
return core;
|
||||
}
|
||||
@@ -367,7 +367,7 @@ public:
|
||||
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
|
||||
}
|
||||
|
||||
constexpr Thread* MoveToScheduledBack(Member* member) {
|
||||
constexpr KThread* MoveToScheduledBack(Member* member) {
|
||||
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
|
||||
member);
|
||||
}
|
||||
|
||||
57
src/core/hle/kernel/k_readable_event.cpp
Normal file
57
src/core/hle/kernel/k_readable_event.cpp
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2021 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name)
|
||||
: KSynchronizationObject{kernel, std::move(name)} {}
|
||||
KReadableEvent::~KReadableEvent() = default;
|
||||
|
||||
bool KReadableEvent::IsSignaled() const {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Signal() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Clear() {
|
||||
Reset();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
is_signaled = false;
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
51
src/core/hle/kernel/k_readable_event.h
Normal file
51
src/core/hle/kernel/k_readable_event.h
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2021 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KEvent;
|
||||
|
||||
class KReadableEvent final : public KSynchronizationObject {
|
||||
public:
|
||||
explicit KReadableEvent(KernelCore& kernel, std::string&& name);
|
||||
~KReadableEvent() override;
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "KReadableEvent";
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
KEvent* GetParent() const {
|
||||
return parent;
|
||||
}
|
||||
|
||||
void Initialize(KEvent* parent_) {
|
||||
is_signaled = false;
|
||||
parent = parent_;
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
void Finalize() override {}
|
||||
|
||||
ResultCode Signal();
|
||||
ResultCode Clear();
|
||||
ResultCode Reset();
|
||||
|
||||
private:
|
||||
bool is_signaled{};
|
||||
KEvent* parent{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
152
src/core/hle/kernel/k_resource_limit.cpp
Normal file
152
src/core/hle/kernel/k_resource_limit.cpp
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
|
||||
|
||||
KResourceLimit::KResourceLimit(KernelCore& kernel, Core::System& system)
|
||||
: Object{kernel}, lock{kernel}, cond_var{kernel}, kernel{kernel}, system(system) {}
|
||||
KResourceLimit::~KResourceLimit() = default;
|
||||
|
||||
s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
s64 value{};
|
||||
{
|
||||
KScopedLightLock lk{lock};
|
||||
value = limit_values[index];
|
||||
ASSERT(value >= 0);
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
s64 value{};
|
||||
{
|
||||
KScopedLightLock lk{lock};
|
||||
value = current_values[index];
|
||||
ASSERT(value >= 0);
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
s64 value{};
|
||||
{
|
||||
KScopedLightLock lk{lock};
|
||||
value = peak_values[index];
|
||||
ASSERT(value >= 0);
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
s64 value{};
|
||||
{
|
||||
KScopedLightLock lk(lock);
|
||||
ASSERT(current_values[index] >= 0);
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
value = limit_values[index] - current_values[index];
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
R_UNLESS(current_values[index] <= value, Svc::ResultInvalidState);
|
||||
|
||||
limit_values[index] = value;
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
|
||||
return Reserve(which, value, system.CoreTiming().GetGlobalTimeNs().count() + DefaultTimeout);
|
||||
}
|
||||
|
||||
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
|
||||
ASSERT(value >= 0);
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
if (current_hints[index] >= limit_values[index]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Loop until we reserve or run out of time.
|
||||
while (true) {
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
|
||||
// If we would overflow, don't allow to succeed.
|
||||
if (current_values[index] + value <= current_values[index]) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (current_values[index] + value <= limit_values[index]) {
|
||||
current_values[index] += value;
|
||||
current_hints[index] += value;
|
||||
peak_values[index] = std::max(peak_values[index], current_values[index]);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (current_hints[index] + value <= limit_values[index] &&
|
||||
(timeout < 0 || system.CoreTiming().GetGlobalTimeNs().count() < timeout)) {
|
||||
waiter_count++;
|
||||
cond_var.Wait(&lock, timeout);
|
||||
waiter_count--;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void KResourceLimit::Release(LimitableResource which, s64 value) {
|
||||
Release(which, value, value);
|
||||
}
|
||||
|
||||
void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
|
||||
ASSERT(value >= 0);
|
||||
ASSERT(hint >= 0);
|
||||
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
ASSERT(current_values[index] <= limit_values[index]);
|
||||
ASSERT(current_hints[index] <= current_values[index]);
|
||||
ASSERT(value <= current_values[index]);
|
||||
ASSERT(hint <= current_hints[index]);
|
||||
|
||||
current_values[index] -= value;
|
||||
current_hints[index] -= hint;
|
||||
|
||||
if (waiter_count != 0) {
|
||||
cond_var.Broadcast();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
81
src/core/hle/kernel/k_resource_limit.h
Normal file
81
src/core/hle/kernel/k_resource_limit.h
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_light_condition_variable.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
class KernelCore;
|
||||
enum class LimitableResource : u32 {
|
||||
PhysicalMemory = 0,
|
||||
Threads = 1,
|
||||
Events = 2,
|
||||
TransferMemory = 3,
|
||||
Sessions = 4,
|
||||
|
||||
Count,
|
||||
};
|
||||
|
||||
constexpr bool IsValidResourceType(LimitableResource type) {
|
||||
return type < LimitableResource::Count;
|
||||
}
|
||||
|
||||
class KResourceLimit final : public Object {
|
||||
public:
|
||||
explicit KResourceLimit(KernelCore& kernel, Core::System& system);
|
||||
~KResourceLimit();
|
||||
|
||||
s64 GetLimitValue(LimitableResource which) const;
|
||||
s64 GetCurrentValue(LimitableResource which) const;
|
||||
s64 GetPeakValue(LimitableResource which) const;
|
||||
s64 GetFreeValue(LimitableResource which) const;
|
||||
|
||||
ResultCode SetLimitValue(LimitableResource which, s64 value);
|
||||
|
||||
bool Reserve(LimitableResource which, s64 value);
|
||||
bool Reserve(LimitableResource which, s64 value, s64 timeout);
|
||||
void Release(LimitableResource which, s64 value);
|
||||
void Release(LimitableResource which, s64 value, s64 hint);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
return "KResourceLimit";
|
||||
}
|
||||
std::string GetName() const override {
|
||||
return GetTypeName();
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
|
||||
HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
virtual void Finalize() override {}
|
||||
|
||||
private:
|
||||
using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
|
||||
ResourceArray limit_values{};
|
||||
ResourceArray current_values{};
|
||||
ResourceArray current_hints{};
|
||||
ResourceArray peak_values{};
|
||||
mutable KLightLock lock;
|
||||
s32 waiter_count{};
|
||||
KLightConditionVariable cond_var;
|
||||
KernelCore& kernel;
|
||||
Core::System& system;
|
||||
};
|
||||
} // namespace Kernel
|
||||
@@ -5,6 +5,8 @@
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <bit>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/fiber.h"
|
||||
@@ -15,28 +17,33 @@
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static void IncrementScheduledCount(Kernel::Thread* thread) {
|
||||
static void IncrementScheduledCount(Kernel::KThread* thread) {
|
||||
if (auto process = thread->GetOwnerProcess(); process) {
|
||||
process->IncrementScheduledCount();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
u32 current_core = global_thread.host_handle;
|
||||
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
||||
(current_core < Core::Hardware::NUM_CPU_CORES);
|
||||
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
|
||||
auto scheduler = kernel.CurrentScheduler();
|
||||
|
||||
u32 current_core{0xF};
|
||||
bool must_context_switch{};
|
||||
if (scheduler) {
|
||||
current_core = scheduler->core_id;
|
||||
// TODO(bunnei): Should be set to true when we deprecate single core
|
||||
must_context_switch = !kernel.IsPhantomModeForSingleCore();
|
||||
}
|
||||
|
||||
while (cores_pending_reschedule != 0) {
|
||||
u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
|
||||
const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
|
||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||
if (!must_context_switch || core != current_core) {
|
||||
auto& phys_core = kernel.PhysicalCore(core);
|
||||
@@ -54,28 +61,27 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
|
||||
}
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
|
||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
|
||||
std::scoped_lock lock{guard};
|
||||
if (Thread* prev_highest_thread = this->state.highest_priority_thread;
|
||||
if (KThread* prev_highest_thread = state.highest_priority_thread;
|
||||
prev_highest_thread != highest_thread) {
|
||||
if (prev_highest_thread != nullptr) {
|
||||
IncrementScheduledCount(prev_highest_thread);
|
||||
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
|
||||
}
|
||||
if (this->state.should_count_idle) {
|
||||
if (state.should_count_idle) {
|
||||
if (highest_thread != nullptr) {
|
||||
// if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||
// process->SetRunningThread(this->core_id, highest_thread,
|
||||
// this->state.idle_count);
|
||||
//}
|
||||
if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
|
||||
process->SetRunningThread(core_id, highest_thread, state.idle_count);
|
||||
}
|
||||
} else {
|
||||
this->state.idle_count++;
|
||||
state.idle_count++;
|
||||
}
|
||||
}
|
||||
|
||||
this->state.highest_priority_thread = highest_thread;
|
||||
this->state.needs_scheduling = true;
|
||||
return (1ULL << this->core_id);
|
||||
state.highest_priority_thread = highest_thread;
|
||||
state.needs_scheduling.store(true);
|
||||
return (1ULL << core_id);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
@@ -88,16 +94,29 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
ClearSchedulerUpdateNeeded(kernel);
|
||||
|
||||
u64 cores_needing_scheduling = 0, idle_cores = 0;
|
||||
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
/// We want to go over all cores, finding the highest priority thread and determining if
|
||||
/// scheduling is needed for that core.
|
||||
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
|
||||
if (top_thread != nullptr) {
|
||||
// If the thread has no waiters, we need to check if the process has a thread pinned.
|
||||
// TODO(bunnei): Implement thread pinning
|
||||
if (top_thread->GetNumKernelWaiters() == 0) {
|
||||
if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
|
||||
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
|
||||
pinned != nullptr && pinned != top_thread) {
|
||||
// We prefer our parent's pinned thread if possible. However, we also don't
|
||||
// want to schedule un-runnable threads.
|
||||
if (pinned->GetRawState() == ThreadState::Runnable) {
|
||||
top_thread = pinned;
|
||||
} else {
|
||||
top_thread = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
idle_cores |= (1ULL << core_id);
|
||||
}
|
||||
@@ -109,8 +128,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
|
||||
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
|
||||
while (idle_cores != 0) {
|
||||
u32 core_id = Common::CountTrailingZeroes64(idle_cores);
|
||||
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||
const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
|
||||
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
|
||||
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
|
||||
size_t num_candidates = 0;
|
||||
|
||||
@@ -118,7 +137,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_thread =
|
||||
if (KThread* top_thread =
|
||||
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
|
||||
top_thread != suggested) {
|
||||
// Make sure we're not dealing with threads too high priority for migration.
|
||||
@@ -150,7 +169,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
// Check if there's some other thread that can run on the candidate core.
|
||||
const s32 candidate_core = migration_candidates[i];
|
||||
suggested = top_threads[candidate_core];
|
||||
if (Thread* next_on_candidate_core =
|
||||
if (KThread* next_on_candidate_core =
|
||||
priority_queue.GetScheduledNext(candidate_core, suggested);
|
||||
next_on_candidate_core != nullptr) {
|
||||
// The candidate core can run some other thread! We'll migrate its current
|
||||
@@ -180,22 +199,35 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
|
||||
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
|
||||
// Get an atomic reference to the core scheduler's previous thread.
|
||||
std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
|
||||
static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
|
||||
|
||||
// Atomically clear the previous thread if it's our target.
|
||||
KThread* compare = thread;
|
||||
prev_thread.compare_exchange_strong(compare, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Check if the state has changed, because if it hasn't there's nothing to do.
|
||||
const auto cur_state = thread->scheduling_state;
|
||||
const auto cur_state = thread->GetRawState();
|
||||
if (cur_state == old_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the priority queues.
|
||||
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
if (old_state == ThreadState::Runnable) {
|
||||
// If we were previously runnable, then we're not runnable now, and we should remove.
|
||||
GetPriorityQueue(kernel).Remove(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
} else if (cur_state == ThreadState::Runnable) {
|
||||
// If we're now runnable, then we weren't previously, and we should add.
|
||||
GetPriorityQueue(kernel).PushBack(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
@@ -203,13 +235,11 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority) {
|
||||
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its priority in the queue.
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||
GetPriorityQueue(kernel).ChangePriority(
|
||||
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||
IncrementScheduledCount(thread);
|
||||
@@ -217,12 +247,12 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thr
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its affinity in the queue.
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
@@ -237,8 +267,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
auto& priority_queue = GetPriorityQueue(kernel);
|
||||
|
||||
// Rotate the front of the queue to the end.
|
||||
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||
Thread* next_thread = nullptr;
|
||||
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
|
||||
KThread* next_thread = nullptr;
|
||||
if (top_thread != nullptr) {
|
||||
next_thread = priority_queue.MoveToScheduledBack(top_thread);
|
||||
if (next_thread != top_thread) {
|
||||
@@ -249,11 +279,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
{
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@@ -285,15 +315,15 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
// Now that we might have migrated a thread with the same priority, check if we can do better.
|
||||
|
||||
{
|
||||
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
|
||||
if (best_thread == GetCurrentThread()) {
|
||||
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
|
||||
}
|
||||
|
||||
// If the best thread we can choose has a priority the same or worse than ours, try to
|
||||
// migrate a higher priority thread.
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// If the suggestion's priority is the same as ours, don't bother.
|
||||
if (suggested->GetPriority() >= best_thread->GetPriority()) {
|
||||
@@ -302,7 +332,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@@ -352,12 +382,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
||||
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||
scheduler->GetCurrentThread()->EnableDispatch();
|
||||
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
||||
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
||||
scheduler->GetCurrentThread()->EnableDispatch();
|
||||
}
|
||||
}
|
||||
RescheduleCores(kernel, cores_needing_scheduling, global_thread);
|
||||
RescheduleCores(kernel, cores_needing_scheduling);
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||
@@ -372,15 +404,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
|
||||
return kernel.GlobalSchedulerContext().priority_queue;
|
||||
}
|
||||
|
||||
void KScheduler::YieldWithoutCoreMigration() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -395,10 +425,10 @@ void KScheduler::YieldWithoutCoreMigration() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// If the next thread is different, we have an update to perform.
|
||||
@@ -413,15 +443,13 @@ void KScheduler::YieldWithoutCoreMigration() {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::YieldWithCoreMigration() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -436,23 +464,23 @@ void KScheduler::YieldWithCoreMigration() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
bool recheck = false;
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the thread running on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
|
||||
if (Thread* running_on_suggested_core =
|
||||
if (KThread* running_on_suggested_core =
|
||||
(suggested_core >= 0)
|
||||
? kernel.Scheduler(suggested_core).state.highest_priority_thread
|
||||
: nullptr;
|
||||
@@ -503,15 +531,13 @@ void KScheduler::YieldWithCoreMigration() {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::YieldToAnyThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
void KScheduler::YieldToAnyThread(KernelCore& kernel) {
|
||||
// Validate preconditions.
|
||||
ASSERT(CanSchedule(kernel));
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
Thread& cur_thread = *GetCurrentThread();
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
Process& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -526,8 +552,8 @@ void KScheduler::YieldToAnyThread() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
@@ -539,11 +565,11 @@ void KScheduler::YieldToAnyThread() {
|
||||
// If there's nothing scheduled, we can try to perform a migration.
|
||||
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
|
||||
// While we have a suggested thread, try to migrate it!
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// Check if the suggested thread is the top thread on its core.
|
||||
const s32 suggested_core = suggested->GetActiveCore();
|
||||
if (Thread* top_on_suggested_core =
|
||||
if (KThread* top_on_suggested_core =
|
||||
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
|
||||
: nullptr;
|
||||
top_on_suggested_core != suggested) {
|
||||
@@ -581,22 +607,21 @@ void KScheduler::YieldToAnyThread() {
|
||||
}
|
||||
}
|
||||
|
||||
KScheduler::KScheduler(Core::System& system, std::size_t core_id)
|
||||
: system(system), core_id(core_id) {
|
||||
KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
|
||||
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
|
||||
this->state.needs_scheduling = true;
|
||||
this->state.interrupt_task_thread_runnable = false;
|
||||
this->state.should_count_idle = false;
|
||||
this->state.idle_count = 0;
|
||||
this->state.idle_thread_stack = nullptr;
|
||||
this->state.highest_priority_thread = nullptr;
|
||||
state.needs_scheduling.store(true);
|
||||
state.interrupt_task_thread_runnable = false;
|
||||
state.should_count_idle = false;
|
||||
state.idle_count = 0;
|
||||
state.idle_thread_stack = nullptr;
|
||||
state.highest_priority_thread = nullptr;
|
||||
}
|
||||
|
||||
KScheduler::~KScheduler() = default;
|
||||
|
||||
Thread* KScheduler::GetCurrentThread() const {
|
||||
if (current_thread) {
|
||||
return current_thread;
|
||||
KThread* KScheduler::GetCurrentThread() const {
|
||||
if (auto result = current_thread.load(); result) {
|
||||
return result;
|
||||
}
|
||||
return idle_thread;
|
||||
}
|
||||
@@ -613,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
phys_core.ClearInterrupt();
|
||||
}
|
||||
guard.lock();
|
||||
if (this->state.needs_scheduling) {
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
guard.unlock();
|
||||
@@ -624,67 +649,76 @@ void KScheduler::OnThreadStart() {
|
||||
SwitchContextStep2();
|
||||
}
|
||||
|
||||
void KScheduler::Unload(Thread* thread) {
|
||||
void KScheduler::Unload(KThread* thread) {
|
||||
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||
|
||||
if (thread) {
|
||||
thread->SetIsRunning(false);
|
||||
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
|
||||
if (thread->IsCallingSvc()) {
|
||||
system.ArmInterface(core_id).ExceptionalExit();
|
||||
thread->SetContinuousOnSVC(false);
|
||||
thread->ClearIsCallingSvc();
|
||||
}
|
||||
if (!thread->IsHLEThread() && !thread->HasExited()) {
|
||||
if (!thread->IsTerminationRequested()) {
|
||||
prev_thread = thread;
|
||||
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.SaveContext(thread->GetContext32());
|
||||
cpu_core.SaveContext(thread->GetContext64());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
} else {
|
||||
prev_thread = nullptr;
|
||||
}
|
||||
thread->context_guard.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::Reload(Thread* thread) {
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
|
||||
"Thread must be runnable.");
|
||||
void KScheduler::Reload(KThread* thread) {
|
||||
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
thread->SetIsRunning(true);
|
||||
thread->SetWasRunning(false);
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||
|
||||
auto* const thread_owner_process = thread->GetOwnerProcess();
|
||||
if (thread_owner_process != nullptr) {
|
||||
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||
}
|
||||
if (!thread->IsHLEThread()) {
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.LoadContext(thread->GetContext32());
|
||||
cpu_core.LoadContext(thread->GetContext64());
|
||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
}
|
||||
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.LoadContext(thread->GetContext32());
|
||||
cpu_core.LoadContext(thread->GetContext64());
|
||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::SwitchContextStep2() {
|
||||
// Load context of new thread
|
||||
Reload(current_thread);
|
||||
Reload(current_thread.load());
|
||||
|
||||
RescheduleCurrentCore();
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
Thread* previous_thread = current_thread;
|
||||
current_thread = state.highest_priority_thread;
|
||||
KThread* previous_thread = current_thread.load();
|
||||
KThread* next_thread = state.highest_priority_thread;
|
||||
|
||||
this->state.needs_scheduling = false;
|
||||
state.needs_scheduling = false;
|
||||
|
||||
if (current_thread == previous_thread) {
|
||||
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
||||
if (next_thread == nullptr) {
|
||||
next_thread = idle_thread;
|
||||
}
|
||||
|
||||
// If we're not actually switching thread, there's nothing to do.
|
||||
if (next_thread == current_thread.load()) {
|
||||
guard.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
current_thread.store(next_thread);
|
||||
|
||||
Process* const previous_process = system.Kernel().CurrentProcess();
|
||||
|
||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||
@@ -715,28 +749,29 @@ void KScheduler::SwitchToCurrent() {
|
||||
while (true) {
|
||||
{
|
||||
std::scoped_lock lock{guard};
|
||||
current_thread = state.highest_priority_thread;
|
||||
this->state.needs_scheduling = false;
|
||||
current_thread.store(state.highest_priority_thread);
|
||||
state.needs_scheduling.store(false);
|
||||
}
|
||||
const auto is_switch_pending = [this] {
|
||||
std::scoped_lock lock{guard};
|
||||
return state.needs_scheduling.load(std::memory_order_relaxed);
|
||||
return state.needs_scheduling.load();
|
||||
};
|
||||
do {
|
||||
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
|
||||
current_thread->context_guard.lock();
|
||||
if (!current_thread->IsRunnable()) {
|
||||
current_thread->context_guard.unlock();
|
||||
auto next_thread = current_thread.load();
|
||||
if (next_thread != nullptr) {
|
||||
next_thread->context_guard.lock();
|
||||
if (next_thread->GetRawState() != ThreadState::Runnable) {
|
||||
next_thread->context_guard.unlock();
|
||||
break;
|
||||
}
|
||||
if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
|
||||
current_thread->context_guard.unlock();
|
||||
if (next_thread->GetActiveCore() != core_id) {
|
||||
next_thread->context_guard.unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
std::shared_ptr<Common::Fiber>* next_context;
|
||||
if (current_thread != nullptr) {
|
||||
next_context = ¤t_thread->GetHostContext();
|
||||
if (next_thread != nullptr) {
|
||||
next_context = &next_thread->GetHostContext();
|
||||
} else {
|
||||
next_context = &idle_thread->GetHostContext();
|
||||
}
|
||||
@@ -745,13 +780,13 @@ void KScheduler::SwitchToCurrent() {
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
||||
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
|
||||
const u64 prev_switch_ticks = last_context_switch_time;
|
||||
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
|
||||
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
|
||||
|
||||
if (thread != nullptr) {
|
||||
thread->UpdateCPUTimeTicks(update_ticks);
|
||||
thread->AddCpuTime(core_id, update_ticks);
|
||||
}
|
||||
|
||||
if (process != nullptr) {
|
||||
@@ -765,15 +800,10 @@ void KScheduler::Initialize() {
|
||||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
|
||||
KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
idle_thread = thread_res.Unwrap().get();
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock{system.Kernel()};
|
||||
idle_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
|
||||
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
||||
|
||||
@@ -29,29 +29,33 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class SchedulerLock;
|
||||
class Thread;
|
||||
class KThread;
|
||||
|
||||
class KScheduler final {
|
||||
public:
|
||||
explicit KScheduler(Core::System& system, std::size_t core_id);
|
||||
explicit KScheduler(Core::System& system, s32 core_id);
|
||||
~KScheduler();
|
||||
|
||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||
void RescheduleCurrentCore();
|
||||
|
||||
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
|
||||
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread);
|
||||
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
|
||||
|
||||
/// The next two are for SingleCore Only.
|
||||
/// Unload current thread before preempting core.
|
||||
void Unload(Thread* thread);
|
||||
void Unload(KThread* thread);
|
||||
|
||||
/// Reload current thread after core preemption.
|
||||
void Reload(Thread* thread);
|
||||
void Reload(KThread* thread);
|
||||
|
||||
/// Gets the current running thread
|
||||
[[nodiscard]] Thread* GetCurrentThread() const;
|
||||
[[nodiscard]] KThread* GetCurrentThread() const;
|
||||
|
||||
/// Returns true if the scheduler is idle
|
||||
[[nodiscard]] bool IsIdle() const {
|
||||
return GetCurrentThread() == idle_thread;
|
||||
}
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
|
||||
@@ -72,14 +76,14 @@ public:
|
||||
return switch_fiber;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
|
||||
[[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldWithoutCoreMigration();
|
||||
static void YieldWithoutCoreMigration(KernelCore& kernel);
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it to the back of the it's priority list.
|
||||
@@ -88,7 +92,7 @@ public:
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldWithCoreMigration();
|
||||
static void YieldWithCoreMigration(KernelCore& kernel);
|
||||
|
||||
/**
|
||||
* Takes a thread and moves it out of the scheduling queue.
|
||||
@@ -97,17 +101,18 @@ public:
|
||||
*
|
||||
* @note This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
void YieldToAnyThread();
|
||||
static void YieldToAnyThread(KernelCore& kernel);
|
||||
|
||||
static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
|
||||
|
||||
/// Notify the scheduler a thread's status has changed.
|
||||
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
|
||||
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
|
||||
|
||||
/// Notify the scheduler a thread's priority has changed.
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority);
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
|
||||
|
||||
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
|
||||
const KAffinityMask& old_affinity, s32 old_core);
|
||||
|
||||
static bool CanSchedule(KernelCore& kernel);
|
||||
@@ -115,8 +120,7 @@ public:
|
||||
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
|
||||
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
|
||||
static void DisableScheduling(KernelCore& kernel);
|
||||
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
|
||||
Core::EmuThreadHandle global_thread);
|
||||
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
|
||||
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
|
||||
|
||||
private:
|
||||
@@ -164,13 +168,15 @@ private:
|
||||
* most recent tick count retrieved. No special arithmetic is
|
||||
* applied to it.
|
||||
*/
|
||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||
void UpdateLastContextSwitchTime(KThread* thread, Process* process);
|
||||
|
||||
static void OnSwitch(void* this_scheduler);
|
||||
void SwitchToCurrent();
|
||||
|
||||
Thread* current_thread{};
|
||||
Thread* idle_thread{};
|
||||
KThread* prev_thread{};
|
||||
std::atomic<KThread*> current_thread{};
|
||||
|
||||
KThread* idle_thread;
|
||||
|
||||
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||
|
||||
@@ -179,7 +185,7 @@ private:
|
||||
bool interrupt_task_thread_runnable{};
|
||||
bool should_count_idle{};
|
||||
u64 idle_count{};
|
||||
Thread* highest_priority_thread{};
|
||||
KThread* highest_priority_thread{};
|
||||
void* idle_thread_stack{};
|
||||
};
|
||||
|
||||
@@ -187,7 +193,7 @@ private:
|
||||
|
||||
Core::System& system;
|
||||
u64 last_context_switch_time{};
|
||||
const std::size_t core_id;
|
||||
const s32 core_id;
|
||||
|
||||
Common::SpinLock guard{};
|
||||
};
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -19,49 +20,48 @@ class KernelCore;
|
||||
template <typename SchedulerType>
|
||||
class KAbstractSchedulerLock {
|
||||
public:
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return this->owner_thread == kernel.GetCurrentEmuThreadID();
|
||||
return owner_thread == GetCurrentThreadPointer(kernel);
|
||||
}
|
||||
|
||||
void Lock() {
|
||||
if (this->IsLockedByCurrentThread()) {
|
||||
if (IsLockedByCurrentThread()) {
|
||||
// If we already own the lock, we can just increment the count.
|
||||
ASSERT(this->lock_count > 0);
|
||||
this->lock_count++;
|
||||
ASSERT(lock_count > 0);
|
||||
lock_count++;
|
||||
} else {
|
||||
// Otherwise, we want to disable scheduling and acquire the spinlock.
|
||||
SchedulerType::DisableScheduling(kernel);
|
||||
this->spin_lock.lock();
|
||||
spin_lock.lock();
|
||||
|
||||
// For debug, ensure that our state is valid.
|
||||
ASSERT(this->lock_count == 0);
|
||||
ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
|
||||
ASSERT(lock_count == 0);
|
||||
ASSERT(owner_thread == nullptr);
|
||||
|
||||
// Increment count, take ownership.
|
||||
this->lock_count = 1;
|
||||
this->owner_thread = kernel.GetCurrentEmuThreadID();
|
||||
lock_count = 1;
|
||||
owner_thread = GetCurrentThreadPointer(kernel);
|
||||
}
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
ASSERT(this->lock_count > 0);
|
||||
ASSERT(IsLockedByCurrentThread());
|
||||
ASSERT(lock_count > 0);
|
||||
|
||||
// Release an instance of the lock.
|
||||
if ((--this->lock_count) == 0) {
|
||||
if ((--lock_count) == 0) {
|
||||
// We're no longer going to hold the lock. Take note of what cores need scheduling.
|
||||
const u64 cores_needing_scheduling =
|
||||
SchedulerType::UpdateHighestPriorityThreads(kernel);
|
||||
Core::EmuThreadHandle leaving_thread = owner_thread;
|
||||
|
||||
// Note that we no longer hold the lock, and unlock the spinlock.
|
||||
this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
|
||||
this->spin_lock.unlock();
|
||||
owner_thread = nullptr;
|
||||
spin_lock.unlock();
|
||||
|
||||
// Enable scheduling, and perform a rescheduling operation.
|
||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
|
||||
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ private:
|
||||
KernelCore& kernel;
|
||||
Common::SpinLock spin_lock{};
|
||||
s32 lock_count{};
|
||||
Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
|
||||
KThread* owner_thread{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -9,27 +9,24 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KScopedSchedulerLockAndSleep {
|
||||
public:
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
|
||||
s64 timeout)
|
||||
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
|
||||
event_handle = InvalidHandle;
|
||||
|
||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
|
||||
: kernel(kernel), thread(t), timeout_tick(timeout) {
|
||||
// Lock the scheduler.
|
||||
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
||||
}
|
||||
|
||||
~KScopedSchedulerLockAndSleep() {
|
||||
// Register the sleep.
|
||||
if (this->timeout_tick > 0) {
|
||||
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
|
||||
if (timeout_tick > 0) {
|
||||
kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
|
||||
}
|
||||
|
||||
// Unlock the scheduler.
|
||||
@@ -37,13 +34,12 @@ public:
|
||||
}
|
||||
|
||||
void CancelSleep() {
|
||||
this->timeout_tick = 0;
|
||||
timeout_tick = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
Handle& event_handle;
|
||||
Thread* thread{};
|
||||
KThread* thread{};
|
||||
s64 timeout_tick{};
|
||||
};
|
||||
|
||||
|
||||
171
src/core/hle/kernel/k_synchronization_object.cpp
Normal file
171
src/core/hle/kernel/k_synchronization_object.cpp
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout) {
|
||||
// Allocate space on stack for thread nodes.
|
||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||
|
||||
// Prepare for wait.
|
||||
KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
{
|
||||
// Setup the scheduling lock and sleep.
|
||||
KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
|
||||
|
||||
// Check if any of the objects are already signaled.
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
ASSERT(objects[i] != nullptr);
|
||||
|
||||
if (objects[i]->IsSignaled()) {
|
||||
*out_index = i;
|
||||
slp.CancelSleep();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the timeout is zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Check if waiting was canceled.
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
thread->ClearWaitCancelled();
|
||||
return Svc::ResultCancelled;
|
||||
}
|
||||
|
||||
// Add the waiters.
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
thread_nodes[i].thread = thread;
|
||||
thread_nodes[i].next = nullptr;
|
||||
|
||||
if (objects[i]->thread_list_tail == nullptr) {
|
||||
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
||||
} else {
|
||||
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||
|
||||
// Mark the thread as waiting.
|
||||
thread->SetCancellable();
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||
}
|
||||
|
||||
// The lock/sleep is done, so we should be able to get our result.
|
||||
|
||||
// Thread is no longer cancellable.
|
||||
thread->ClearCancellable();
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({});
|
||||
|
||||
// Cancel the timer as needed.
|
||||
kernel.TimeManager().UnscheduleTimeEvent(thread);
|
||||
|
||||
// Get the wait result.
|
||||
ResultCode wait_result{RESULT_SUCCESS};
|
||||
s32 sync_index = -1;
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
KSynchronizationObject* synced_obj;
|
||||
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
// Unlink the object from the list.
|
||||
ThreadListNode* prev_ptr =
|
||||
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
||||
ThreadListNode* prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||
|
||||
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||
objects[i]->thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = thread_nodes[i].next;
|
||||
|
||||
if (objects[i] == synced_obj) {
|
||||
sync_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set output.
|
||||
*out_index = sync_index;
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||
|
||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
|
||||
: Object{kernel, std::move(name)} {}
|
||||
|
||||
KSynchronizationObject::~KSynchronizationObject() = default;
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// If we're not signaled, we've nothing to notify.
|
||||
if (!this->IsSignaled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Iterate over each thread.
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
KThread* thread = cur_node->thread;
|
||||
if (thread->GetState() == ThreadState::Waiting) {
|
||||
thread->SetSyncedObject(this, result);
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
||||
std::vector<KThread*> threads;
|
||||
|
||||
// If debugging, dump the list of waiters.
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
threads.emplace_back(cur_node->thread);
|
||||
}
|
||||
}
|
||||
|
||||
return threads;
|
||||
}
|
||||
} // namespace Kernel
|
||||
59
src/core/hle/kernel/k_synchronization_object.h
Normal file
59
src/core/hle/kernel/k_synchronization_object.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class KThread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class KSynchronizationObject : public Object {
|
||||
public:
|
||||
struct ThreadListNode {
|
||||
ThreadListNode* next{};
|
||||
KThread* thread{};
|
||||
};
|
||||
|
||||
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout);
|
||||
|
||||
[[nodiscard]] virtual bool IsSignaled() const = 0;
|
||||
|
||||
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
||||
|
||||
protected:
|
||||
explicit KSynchronizationObject(KernelCore& kernel);
|
||||
explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
|
||||
virtual ~KSynchronizationObject();
|
||||
|
||||
void NotifyAvailable(ResultCode result);
|
||||
void NotifyAvailable() {
|
||||
return this->NotifyAvailable(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
private:
|
||||
ThreadListNode* thread_list_head{};
|
||||
ThreadListNode* thread_list_tail{};
|
||||
};
|
||||
|
||||
// Specialization of DynamicObjectCast for KSynchronizationObjects
|
||||
template <>
|
||||
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
|
||||
std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->IsWaitable()) {
|
||||
return std::static_pointer_cast<KSynchronizationObject>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
1050
src/core/hle/kernel/k_thread.cpp
Normal file
1050
src/core/hle/kernel/k_thread.cpp
Normal file
File diff suppressed because it is too large
Load Diff
768
src/core/hle/kernel/k_thread.h
Normal file
768
src/core/hle/kernel/k_thread.h
Normal file
@@ -0,0 +1,768 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <span>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
class Fiber;
|
||||
}
|
||||
|
||||
namespace Core {
|
||||
class ARM_Interface;
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class GlobalSchedulerContext;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class KScheduler;
|
||||
class KThreadQueue;
|
||||
|
||||
using KThreadFunction = VAddr;
|
||||
|
||||
enum class ThreadType : u32 {
|
||||
Main = 0,
|
||||
Kernel = 1,
|
||||
HighPriority = 2,
|
||||
User = 3,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
||||
|
||||
enum class SuspendType : u32 {
|
||||
Process = 0,
|
||||
Thread = 1,
|
||||
Debug = 2,
|
||||
Backtrace = 3,
|
||||
Init = 4,
|
||||
|
||||
Count,
|
||||
};
|
||||
|
||||
enum class ThreadState : u16 {
|
||||
Initialized = 0,
|
||||
Waiting = 1,
|
||||
Runnable = 2,
|
||||
Terminated = 3,
|
||||
|
||||
SuspendShift = 4,
|
||||
Mask = (1 << SuspendShift) - 1,
|
||||
|
||||
ProcessSuspended = (1 << (0 + SuspendShift)),
|
||||
ThreadSuspended = (1 << (1 + SuspendShift)),
|
||||
DebugSuspended = (1 << (2 + SuspendShift)),
|
||||
BacktraceSuspended = (1 << (3 + SuspendShift)),
|
||||
InitSuspended = (1 << (4 + SuspendShift)),
|
||||
|
||||
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
||||
|
||||
enum class DpcFlag : u32 {
|
||||
Terminating = (1 << 0),
|
||||
Terminated = (1 << 1),
|
||||
};
|
||||
|
||||
enum class ThreadWaitReasonForDebugging : u32 {
|
||||
None, ///< Thread is not waiting
|
||||
Sleep, ///< Thread is waiting due to a SleepThread SVC
|
||||
IPC, ///< Thread is waiting for the reply from an IPC request
|
||||
Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
|
||||
ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
|
||||
Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
|
||||
Suspended, ///< Thread is waiting due to process suspension
|
||||
};
|
||||
|
||||
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
|
||||
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
|
||||
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
|
||||
|
||||
class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
public:
|
||||
static constexpr s32 DefaultThreadPriority = 44;
|
||||
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
|
||||
|
||||
explicit KThread(KernelCore& kernel);
|
||||
~KThread() override;
|
||||
|
||||
public:
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
using WaiterList = boost::intrusive::list<KThread>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
* @param name The friendly name desired for the thread
|
||||
* @param entry_point The address at which the thread should start execution
|
||||
* @param priority The thread's priority
|
||||
* @param arg User data to pass to the thread
|
||||
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
|
||||
* @param stack_top The address of the thread's stack top
|
||||
* @param owner_process The parent process for the thread, if null, it's a kernel thread
|
||||
* @param thread_start_func The function where the host context will start.
|
||||
* @param thread_start_parameter The parameter which will passed to host context on init
|
||||
* @return A shared pointer to the newly created thread
|
||||
*/
|
||||
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
|
||||
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
|
||||
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
|
||||
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
|
||||
|
||||
[[nodiscard]] std::string GetName() const override {
|
||||
return name;
|
||||
}
|
||||
|
||||
void SetName(std::string new_name) {
|
||||
name = std::move(new_name);
|
||||
}
|
||||
|
||||
[[nodiscard]] std::string GetTypeName() const override {
|
||||
return "Thread";
|
||||
}
|
||||
|
||||
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
|
||||
[[nodiscard]] HandleType GetHandleType() const override {
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's current priority
|
||||
* @return The current thread's priority
|
||||
*/
|
||||
[[nodiscard]] s32 GetPriority() const {
|
||||
return priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetPriority(s32 value) {
|
||||
priority = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
[[nodiscard]] s32 GetBasePriority() const {
|
||||
return base_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
[[nodiscard]] u64 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
void ContinueIfHasKernelWaiters() {
|
||||
if (GetNumKernelWaiters() > 0) {
|
||||
Continue();
|
||||
}
|
||||
}
|
||||
|
||||
void Wakeup();
|
||||
|
||||
void SetBasePriority(s32 value);
|
||||
|
||||
[[nodiscard]] ResultCode Run();
|
||||
|
||||
void Exit();
|
||||
|
||||
[[nodiscard]] u32 GetSuspendFlags() const {
|
||||
return suspend_allowed_flags & suspend_request_flags;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsSuspended() const {
|
||||
return GetSuspendFlags() != 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
|
||||
return (suspend_request_flags &
|
||||
(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
|
||||
0;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsSuspendRequested() const {
|
||||
return suspend_request_flags != 0;
|
||||
}
|
||||
|
||||
void RequestSuspend(SuspendType type);
|
||||
|
||||
void Resume(SuspendType type);
|
||||
|
||||
void TrySuspend();
|
||||
|
||||
void Continue();
|
||||
|
||||
void Suspend();
|
||||
|
||||
void Finalize() override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
|
||||
synced_object = obj;
|
||||
wait_result = wait_res;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||
*out = synced_object;
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the Thread Local Storage address of the current thread
|
||||
* @returns VAddr of the thread's TLS
|
||||
*/
|
||||
[[nodiscard]] VAddr GetTLSAddress() const {
|
||||
return tls_address;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
* @returns The value of the TPIDR_EL0 register.
|
||||
*/
|
||||
[[nodiscard]] u64 GetTPIDR_EL0() const {
|
||||
return thread_context_64.tpidr;
|
||||
}
|
||||
|
||||
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
|
||||
void SetTPIDR_EL0(u64 value) {
|
||||
thread_context_64.tpidr = value;
|
||||
thread_context_32.tpidr = static_cast<u32>(value);
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadContext32& GetContext32() {
|
||||
return thread_context_32;
|
||||
}
|
||||
|
||||
[[nodiscard]] const ThreadContext32& GetContext32() const {
|
||||
return thread_context_32;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadContext64& GetContext64() {
|
||||
return thread_context_64;
|
||||
}
|
||||
|
||||
[[nodiscard]] const ThreadContext64& GetContext64() const {
|
||||
return thread_context_64;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
|
||||
|
||||
[[nodiscard]] ThreadState GetState() const {
|
||||
return thread_state & ThreadState::Mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadState GetRawState() const {
|
||||
return thread_state;
|
||||
}
|
||||
|
||||
void SetState(ThreadState state);
|
||||
|
||||
[[nodiscard]] s64 GetLastScheduledTick() const {
|
||||
return last_scheduled_tick;
|
||||
}
|
||||
|
||||
void SetLastScheduledTick(s64 tick) {
|
||||
last_scheduled_tick = tick;
|
||||
}
|
||||
|
||||
void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
|
||||
cpu_time += amount;
|
||||
// TODO(bunnei): Debug kernels track per-core tick counts. Should we?
|
||||
}
|
||||
|
||||
[[nodiscard]] s64 GetCpuTime() const {
|
||||
return cpu_time;
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetActiveCore() const {
|
||||
return core_id;
|
||||
}
|
||||
|
||||
void SetActiveCore(s32 core) {
|
||||
core_id = core;
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetCurrentCore() const {
|
||||
return current_core_id;
|
||||
}
|
||||
|
||||
void SetCurrentCore(s32 core) {
|
||||
current_core_id = core;
|
||||
}
|
||||
|
||||
[[nodiscard]] Process* GetOwnerProcess() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
[[nodiscard]] const Process* GetOwnerProcess() const {
|
||||
return parent;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsUserThread() const {
|
||||
return parent != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] KThread* GetLockOwner() const {
|
||||
return lock_owner;
|
||||
}
|
||||
|
||||
void SetLockOwner(KThread* owner) {
|
||||
lock_owner = owner;
|
||||
}
|
||||
|
||||
[[nodiscard]] const KAffinityMask& GetAffinityMask() const {
|
||||
return physical_affinity_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
|
||||
|
||||
[[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
|
||||
|
||||
[[nodiscard]] ResultCode Sleep(s64 timeout);
|
||||
|
||||
[[nodiscard]] s64 GetYieldScheduleCount() const {
|
||||
return schedule_count;
|
||||
}
|
||||
|
||||
void SetYieldScheduleCount(s64 count) {
|
||||
schedule_count = count;
|
||||
}
|
||||
|
||||
void WaitCancel();
|
||||
|
||||
[[nodiscard]] bool IsWaitCancelled() const {
|
||||
return wait_cancelled;
|
||||
}
|
||||
|
||||
[[nodiscard]] void ClearWaitCancelled() {
|
||||
wait_cancelled = false;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsCancellable() const {
|
||||
return cancellable;
|
||||
}
|
||||
|
||||
void SetCancellable() {
|
||||
cancellable = true;
|
||||
}
|
||||
|
||||
void ClearCancellable() {
|
||||
cancellable = false;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsTerminationRequested() const {
|
||||
return termination_requested || GetRawState() == ThreadState::Terminated;
|
||||
}
|
||||
|
||||
struct StackParameters {
|
||||
u8 svc_permission[0x10];
|
||||
std::atomic<u8> dpc_flags;
|
||||
u8 current_svc_id;
|
||||
bool is_calling_svc;
|
||||
bool is_in_exception_handler;
|
||||
bool is_pinned;
|
||||
s32 disable_count;
|
||||
KThread* cur_thread;
|
||||
};
|
||||
|
||||
[[nodiscard]] StackParameters& GetStackParameters() {
|
||||
return stack_parameters;
|
||||
}
|
||||
|
||||
[[nodiscard]] const StackParameters& GetStackParameters() const {
|
||||
return stack_parameters;
|
||||
}
|
||||
|
||||
class QueueEntry {
|
||||
public:
|
||||
constexpr QueueEntry() = default;
|
||||
|
||||
constexpr void Initialize() {
|
||||
prev = nullptr;
|
||||
next = nullptr;
|
||||
}
|
||||
|
||||
constexpr KThread* GetPrev() const {
|
||||
return prev;
|
||||
}
|
||||
constexpr KThread* GetNext() const {
|
||||
return next;
|
||||
}
|
||||
constexpr void SetPrev(KThread* thread) {
|
||||
prev = thread;
|
||||
}
|
||||
constexpr void SetNext(KThread* thread) {
|
||||
next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
KThread* prev{};
|
||||
KThread* next{};
|
||||
};
|
||||
|
||||
[[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
[[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
void SetSleepingQueue(KThreadQueue* q) {
|
||||
sleeping_queue = q;
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
||||
return this->GetStackParameters().disable_count;
|
||||
}
|
||||
|
||||
void DisableDispatch() {
|
||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
||||
this->GetStackParameters().disable_count++;
|
||||
}
|
||||
|
||||
void EnableDispatch() {
|
||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
||||
this->GetStackParameters().disable_count--;
|
||||
}
|
||||
|
||||
void Pin();
|
||||
|
||||
void Unpin();
|
||||
|
||||
void SetInExceptionHandler() {
|
||||
this->GetStackParameters().is_in_exception_handler = true;
|
||||
}
|
||||
|
||||
void ClearInExceptionHandler() {
|
||||
this->GetStackParameters().is_in_exception_handler = false;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsInExceptionHandler() const {
|
||||
return this->GetStackParameters().is_in_exception_handler;
|
||||
}
|
||||
|
||||
void SetIsCallingSvc() {
|
||||
this->GetStackParameters().is_calling_svc = true;
|
||||
}
|
||||
|
||||
void ClearIsCallingSvc() {
|
||||
this->GetStackParameters().is_calling_svc = false;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsCallingSvc() const {
|
||||
return this->GetStackParameters().is_calling_svc;
|
||||
}
|
||||
|
||||
[[nodiscard]] u8 GetSvcId() const {
|
||||
return this->GetStackParameters().current_svc_id;
|
||||
}
|
||||
|
||||
void RegisterDpc(DpcFlag flag) {
|
||||
this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
|
||||
}
|
||||
|
||||
void ClearDpc(DpcFlag flag) {
|
||||
this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
|
||||
}
|
||||
|
||||
[[nodiscard]] u8 GetDpc() const {
|
||||
return this->GetStackParameters().dpc_flags;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool HasDpc() const {
|
||||
return this->GetDpc() != 0;
|
||||
}
|
||||
|
||||
void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
|
||||
wait_reason_for_debugging = reason;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
|
||||
return wait_reason_for_debugging;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
|
||||
return thread_type_for_debugging;
|
||||
}
|
||||
|
||||
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
||||
wait_objects_for_debugging.clear();
|
||||
wait_objects_for_debugging.reserve(objects.size());
|
||||
for (const auto& object : objects) {
|
||||
wait_objects_for_debugging.emplace_back(object);
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
|
||||
return wait_objects_for_debugging;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddressForDebugging(VAddr address) {
|
||||
mutex_wait_address_for_debugging = address;
|
||||
}
|
||||
|
||||
[[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
|
||||
return mutex_wait_address_for_debugging;
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetIdealCoreForDebugging() const {
|
||||
return virtual_ideal_core_id;
|
||||
}
|
||||
|
||||
void AddWaiter(KThread* thread);
|
||||
|
||||
void RemoveWaiter(KThread* thread);
|
||||
|
||||
[[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
|
||||
|
||||
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
[[nodiscard]] VAddr GetAddressKey() const {
|
||||
return address_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetAddressKeyValue() const {
|
||||
return address_key_value;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key) {
|
||||
address_key = key;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key, u32 val) {
|
||||
address_key = key;
|
||||
address_key_value = val;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
}
|
||||
|
||||
[[nodiscard]] s32 GetNumKernelWaiters() const {
|
||||
return num_kernel_waiters;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 GetConditionVariableKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 GetAddressArbiterKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||
union SyncObjectBuffer {
|
||||
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
|
||||
std::array<Handle,
|
||||
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
|
||||
handles;
|
||||
constexpr SyncObjectBuffer() {}
|
||||
};
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
|
||||
struct ConditionVariableComparator {
|
||||
struct LightCompareType {
|
||||
u64 cv_key{};
|
||||
s32 priority{};
|
||||
|
||||
[[nodiscard]] constexpr u64 GetConditionVariableKey() const {
|
||||
return cv_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s32 GetPriority() const {
|
||||
return priority;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, KThread> ||
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const KThread& rhs) {
|
||||
const u64 l_key = lhs.GetConditionVariableKey();
|
||||
const u64 r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
if (l_key < r_key) {
|
||||
// Sort first by key
|
||||
return -1;
|
||||
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
|
||||
// And then by priority.
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void AddWaiterImpl(KThread* thread);
|
||||
|
||||
void RemoveWaiterImpl(KThread* thread);
|
||||
|
||||
void StartTermination();
|
||||
|
||||
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
|
||||
s32 prio, s32 virt_core, Process* owner, ThreadType type);
|
||||
|
||||
[[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio,
|
||||
s32 core, Process* owner, ThreadType type);
|
||||
|
||||
static void RestorePriority(KernelCore& kernel, KThread* thread);
|
||||
|
||||
// For core KThread implementation
|
||||
ThreadContext32 thread_context_32{};
|
||||
ThreadContext64 thread_context_64{};
|
||||
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
s32 priority{};
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
|
||||
&KThread::condvar_arbiter_tree_node>;
|
||||
using ConditionVariableThreadTree =
|
||||
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||
ConditionVariableThreadTree* condvar_tree{};
|
||||
u64 condvar_key{};
|
||||
u64 virtual_affinity_mask{};
|
||||
KAffinityMask physical_affinity_mask{};
|
||||
u64 thread_id{};
|
||||
std::atomic<s64> cpu_time{};
|
||||
KSynchronizationObject* synced_object{};
|
||||
VAddr address_key{};
|
||||
Process* parent{};
|
||||
VAddr kernel_stack_top{};
|
||||
u32* light_ipc_data{};
|
||||
VAddr tls_address{};
|
||||
KLightLock activity_pause_lock;
|
||||
s64 schedule_count{};
|
||||
s64 last_scheduled_tick{};
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
KThreadQueue* sleeping_queue{};
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
KThread* lock_owner{};
|
||||
u32 address_key_value{};
|
||||
u32 suspend_request_flags{};
|
||||
u32 suspend_allowed_flags{};
|
||||
ResultCode wait_result{RESULT_SUCCESS};
|
||||
s32 base_priority{};
|
||||
s32 physical_ideal_core_id{};
|
||||
s32 virtual_ideal_core_id{};
|
||||
s32 num_kernel_waiters{};
|
||||
s32 current_core_id{};
|
||||
s32 core_id{};
|
||||
KAffinityMask original_physical_affinity_mask{};
|
||||
s32 original_physical_ideal_core_id{};
|
||||
s32 num_core_migration_disables{};
|
||||
ThreadState thread_state{};
|
||||
std::atomic<bool> termination_requested{};
|
||||
bool wait_cancelled{};
|
||||
bool cancellable{};
|
||||
bool signaled{};
|
||||
bool initialized{};
|
||||
bool debug_attached{};
|
||||
s8 priority_inheritance_count{};
|
||||
bool resource_limit_release_hint{};
|
||||
StackParameters stack_parameters{};
|
||||
Common::SpinLock context_guard{};
|
||||
|
||||
// For emulation
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
||||
// For debugging
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
VAddr mutex_wait_address_for_debugging{};
|
||||
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
|
||||
ThreadType thread_type_for_debugging{};
|
||||
std::string name;
|
||||
|
||||
public:
|
||||
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
|
||||
|
||||
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
|
||||
u32 value) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = cv_key;
|
||||
address_key = address;
|
||||
address_key_value = value;
|
||||
}
|
||||
|
||||
void ClearConditionVariable() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForConditionVariable() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = address;
|
||||
}
|
||||
|
||||
void ClearAddressArbiter() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForAddressArbiter() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
|
||||
return condvar_tree;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
81
src/core/hle/kernel/k_thread_queue.h
Normal file
81
src/core/hle/kernel/k_thread_queue.h
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KThreadQueue {
|
||||
public:
|
||||
explicit KThreadQueue(KernelCore& kernel) : kernel{kernel} {}
|
||||
|
||||
bool IsEmpty() const {
|
||||
return wait_list.empty();
|
||||
}
|
||||
|
||||
KThread::WaiterList::iterator begin() {
|
||||
return wait_list.begin();
|
||||
}
|
||||
KThread::WaiterList::iterator end() {
|
||||
return wait_list.end();
|
||||
}
|
||||
|
||||
bool SleepThread(KThread* t) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// If the thread needs terminating, don't enqueue it.
|
||||
if (t->IsTerminationRequested()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set the thread's queue and mark it as waiting.
|
||||
t->SetSleepingQueue(this);
|
||||
t->SetState(ThreadState::Waiting);
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(*t);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WakeupThread(KThread* t) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Remove the thread from the queue.
|
||||
wait_list.erase(wait_list.iterator_to(*t));
|
||||
|
||||
// Mark the thread as no longer sleeping.
|
||||
t->SetState(ThreadState::Runnable);
|
||||
t->SetSleepingQueue(nullptr);
|
||||
}
|
||||
|
||||
KThread* WakeupFrontThread() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
if (wait_list.empty()) {
|
||||
return nullptr;
|
||||
} else {
|
||||
// Remove the thread from the queue.
|
||||
auto it = wait_list.begin();
|
||||
KThread* thread = std::addressof(*it);
|
||||
wait_list.erase(it);
|
||||
|
||||
ASSERT(thread->GetState() == ThreadState::Waiting);
|
||||
|
||||
// Mark the thread as no longer sleeping.
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
thread->SetSleepingQueue(nullptr);
|
||||
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
KThread::WaiterList wait_list{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user