Compare commits
116 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3cb9201f1 | ||
|
|
3f3c2dc20f | ||
|
|
78ce053b4d | ||
|
|
2c1e2c63c3 | ||
|
|
603952bc27 | ||
|
|
3196d957b0 | ||
|
|
4ef66ec8fb | ||
|
|
d41ffb592c | ||
|
|
01bc0c84f0 | ||
|
|
2575a93dc6 | ||
|
|
f5c1d7b8c8 | ||
|
|
86ccce3721 | ||
|
|
38e4a144a1 | ||
|
|
9cafb0d912 | ||
|
|
00b09de3d9 | ||
|
|
a2d29412cb | ||
|
|
846c994cc9 | ||
|
|
096366ead5 | ||
|
|
c78f6d4f20 | ||
|
|
c34a95fa25 | ||
|
|
b5d6194f6d | ||
|
|
a5e419535f | ||
|
|
9775fae4eb | ||
|
|
a262dc02b5 | ||
|
|
fca5752690 | ||
|
|
7b48e7b363 | ||
|
|
a7d9be1384 | ||
|
|
abfd690601 | ||
|
|
bf7e78795f | ||
|
|
a14438d013 | ||
|
|
48737a4bb2 | ||
|
|
b321c39371 | ||
|
|
19f475fd70 | ||
|
|
2c56e94702 | ||
|
|
95b844dbae | ||
|
|
9da4e62573 | ||
|
|
1c8f6ba18f | ||
|
|
ab0e71d7cb | ||
|
|
1fd194141a | ||
|
|
737c446fc1 | ||
|
|
73e13aa090 | ||
|
|
0d5792cc57 | ||
|
|
f37b2e6f10 | ||
|
|
24d7aaf43c | ||
|
|
5b2b15091f | ||
|
|
c42fde2a37 | ||
|
|
fef3d8acb5 | ||
|
|
e56410b404 | ||
|
|
a6371fb69d | ||
|
|
a33e7c13fa | ||
|
|
945f3222ae | ||
|
|
9e384ed54b | ||
|
|
561f5c9c14 | ||
|
|
cf7e4bda92 | ||
|
|
208ed712f4 | ||
|
|
d1f2f5f146 | ||
|
|
744a208763 | ||
|
|
f86b770ff7 | ||
|
|
0ae4eae9a6 | ||
|
|
25429998e3 | ||
|
|
5ace5c1b7a | ||
|
|
23514388ed | ||
|
|
f117351783 | ||
|
|
4572634a4e | ||
|
|
103997ee56 | ||
|
|
c9de5474bf | ||
|
|
a7358ff1d4 | ||
|
|
20eab9fed9 | ||
|
|
7620e1a631 | ||
|
|
0eeee431dc | ||
|
|
888f499188 | ||
|
|
c6e7ca562a | ||
|
|
a9b4dd022c | ||
|
|
5568763a57 | ||
|
|
a3b12e3809 | ||
|
|
742f021fdf | ||
|
|
95bcf6ac38 | ||
|
|
e371961219 | ||
|
|
5503338f21 | ||
|
|
fe7184c2a8 | ||
|
|
1c83014526 | ||
|
|
2d903e3ce6 | ||
|
|
e29e8eec2f | ||
|
|
dc47d0f624 | ||
|
|
8b55f2c615 | ||
|
|
fcfe192e83 | ||
|
|
bd38aefc57 | ||
|
|
feaf010fa2 | ||
|
|
ebecdd3a74 | ||
|
|
a29ddcee40 | ||
|
|
d11547024c | ||
|
|
6f59e2676b | ||
|
|
8fea7e56e5 | ||
|
|
58fea44eb5 | ||
|
|
084d7d6b01 | ||
|
|
bd3bfe411d | ||
|
|
963ed37fd6 | ||
|
|
741da9c8bf | ||
|
|
69d92a19a5 | ||
|
|
8671aa8dd0 | ||
|
|
efc89c032b | ||
|
|
d0328f49f1 | ||
|
|
c1bd602e4c | ||
|
|
b3d6f7bdd8 | ||
|
|
12156b199a | ||
|
|
a0407a8e64 | ||
|
|
7582717c9d | ||
|
|
ec85eac3c9 | ||
|
|
fb4b507ba4 | ||
|
|
c3cc65a11e | ||
|
|
1f0fee33ed | ||
|
|
de6c0defb3 | ||
|
|
6c659c3a16 | ||
|
|
af022294dd | ||
|
|
28877cea31 | ||
|
|
cc6a4bedfc |
@@ -1,12 +1,27 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_QT_TRANSLATION=ON
|
||||
ninja
|
||||
LDFLAGS="-fuse-ld=lld"
|
||||
# -femulated-tls required due to an incompatibility between GCC and Clang
|
||||
# TODO(lat9nq): If this is widespread, we probably need to add this to CMakeLists where appropriate
|
||||
cmake .. \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_FLAGS="-femulated-tls" \
|
||||
-DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWClangCross.cmake" \
|
||||
-DDISPLAY_VERSION=$1 \
|
||||
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
||||
-DENABLE_QT_TRANSLATION=ON \
|
||||
-DUSE_CCACHE=ON \
|
||||
-DYUZU_USE_BUNDLED_SDL2=OFF \
|
||||
-DYUZU_USE_EXTERNAL_SDL2=OFF \
|
||||
-GNinja
|
||||
ninja yuzu yuzu-cmd
|
||||
|
||||
ccache -s
|
||||
|
||||
|
||||
55
CMakeModules/MinGWClangCross.cmake
Normal file
55
CMakeModules/MinGWClangCross.cmake
Normal file
@@ -0,0 +1,55 @@
|
||||
set(MINGW_PREFIX /usr/x86_64-w64-mingw32/)
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH ${MINGW_PREFIX})
|
||||
set(SDL2_PATH ${MINGW_PREFIX})
|
||||
set(MINGW_TOOL_PREFIX ${CMAKE_SYSTEM_PROCESSOR}-w64-mingw32-)
|
||||
|
||||
# Specify the cross compiler
|
||||
set(CMAKE_C_COMPILER ${MINGW_TOOL_PREFIX}clang)
|
||||
set(CMAKE_CXX_COMPILER ${MINGW_TOOL_PREFIX}clang++)
|
||||
set(CMAKE_RC_COMPILER ${MINGW_TOOL_PREFIX}windres)
|
||||
set(CMAKE_C_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
|
||||
set(CMAKE_CXX_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
|
||||
set(CMAKE_C_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
|
||||
set(CMAKE_CXX_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
|
||||
|
||||
# Mingw tools
|
||||
set(STRIP ${MINGW_TOOL_PREFIX}strip)
|
||||
set(WINDRES ${MINGW_TOOL_PREFIX}windres)
|
||||
set(ENV{PKG_CONFIG} ${MINGW_TOOL_PREFIX}pkg-config)
|
||||
|
||||
# ccache wrapper
|
||||
option(USE_CCACHE "Use ccache for compilation" OFF)
|
||||
if(USE_CCACHE)
|
||||
find_program(CCACHE ccache)
|
||||
if(CCACHE)
|
||||
message(STATUS "Using ccache found in PATH")
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE})
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE})
|
||||
else(CCACHE)
|
||||
message(WARNING "USE_CCACHE enabled, but no ccache found")
|
||||
endif(CCACHE)
|
||||
endif(USE_CCACHE)
|
||||
|
||||
# Search for programs in the build host directories
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
|
||||
|
||||
# Echo modified cmake vars to screen for debugging purposes
|
||||
if(NOT DEFINED ENV{MINGW_DEBUG_INFO})
|
||||
message("")
|
||||
message("Custom cmake vars: (blank = system default)")
|
||||
message("-----------------------------------------")
|
||||
message("* CMAKE_C_COMPILER : ${CMAKE_C_COMPILER}")
|
||||
message("* CMAKE_CXX_COMPILER : ${CMAKE_CXX_COMPILER}")
|
||||
message("* CMAKE_RC_COMPILER : ${CMAKE_RC_COMPILER}")
|
||||
message("* WINDRES : ${WINDRES}")
|
||||
message("* ENV{PKG_CONFIG} : $ENV{PKG_CONFIG}")
|
||||
message("* STRIP : ${STRIP}")
|
||||
message("* USE_CCACHE : ${USE_CCACHE}")
|
||||
message("")
|
||||
# So that the debug info only appears once
|
||||
set(ENV{MINGW_DEBUG_INFO} SHOWN)
|
||||
endif()
|
||||
5
externals/CMakeLists.txt
vendored
5
externals/CMakeLists.txt
vendored
@@ -40,6 +40,11 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
|
||||
add_library(microprofile INTERFACE)
|
||||
target_include_directories(microprofile INTERFACE ./microprofile)
|
||||
|
||||
# GCC bugs
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND MINGW)
|
||||
target_compile_options(microprofile INTERFACE "-Wno-array-bounds")
|
||||
endif()
|
||||
|
||||
# libusb
|
||||
if (NOT LIBUSB_FOUND OR YUZU_USE_BUNDLED_LIBUSB)
|
||||
add_subdirectory(libusb)
|
||||
|
||||
2
externals/cpp-httplib
vendored
2
externals/cpp-httplib
vendored
Submodule externals/cpp-httplib updated: 9648f950f5...305a7abcb9
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 57af72a567...7f84870712
2
externals/microprofile/microprofile.h
vendored
2
externals/microprofile/microprofile.h
vendored
@@ -1246,7 +1246,7 @@ struct MicroProfileScopeLock
|
||||
{
|
||||
bool bUseLock;
|
||||
std::recursive_mutex& m;
|
||||
MicroProfileScopeLock(std::recursive_mutex& m) : bUseLock(g_bUseLock), m(m)
|
||||
MicroProfileScopeLock(std::recursive_mutex& m_) : bUseLock(g_bUseLock), m(m_)
|
||||
{
|
||||
if(bUseLock)
|
||||
m.lock();
|
||||
|
||||
75
externals/microprofile/microprofileui.h
vendored
75
externals/microprofile/microprofileui.h
vendored
@@ -213,8 +213,8 @@ struct MicroProfileCustom
|
||||
|
||||
struct SOptionDesc
|
||||
{
|
||||
SOptionDesc(){}
|
||||
SOptionDesc(uint8_t nSubType, uint8_t nIndex, const char* fmt, ...):nSubType(nSubType), nIndex(nIndex)
|
||||
SOptionDesc()=default;
|
||||
SOptionDesc(uint8_t nSubType_, uint8_t nIndex_, const char* fmt, ...):nSubType(nSubType_), nIndex(nIndex_)
|
||||
{
|
||||
va_list args;
|
||||
va_start (args, fmt);
|
||||
@@ -573,10 +573,10 @@ inline void MicroProfileToolTipMeta(MicroProfileStringArray* pToolTip)
|
||||
}
|
||||
else
|
||||
{
|
||||
for(int i = 0; i < MICROPROFILE_META_MAX; ++i)
|
||||
for(int k = 0; k < MICROPROFILE_META_MAX; ++k)
|
||||
{
|
||||
nMetaSumInclusive[i] += nMetaSum[i];
|
||||
nMetaSum[i] = 0;
|
||||
nMetaSumInclusive[k] += nMetaSum[k];
|
||||
nMetaSum[k] = 0;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -708,10 +708,10 @@ inline void MicroProfileDrawFloatTooltip(uint32_t nX, uint32_t nY, uint32_t nTok
|
||||
|
||||
if(UI.nMouseLeftMod)
|
||||
{
|
||||
int nIndex = (g_MicroProfileUI.LockedToolTipFront + MICROPROFILE_TOOLTIP_MAX_LOCKED - 1) % MICROPROFILE_TOOLTIP_MAX_LOCKED;
|
||||
g_MicroProfileUI.nLockedToolTipColor[nIndex] = S.TimerInfo[nTimerId].nColor;
|
||||
MicroProfileStringArrayCopy(&g_MicroProfileUI.LockedToolTips[nIndex], &ToolTip);
|
||||
g_MicroProfileUI.LockedToolTipFront = nIndex;
|
||||
int nToolTipIndex = (g_MicroProfileUI.LockedToolTipFront + MICROPROFILE_TOOLTIP_MAX_LOCKED - 1) % MICROPROFILE_TOOLTIP_MAX_LOCKED;
|
||||
g_MicroProfileUI.nLockedToolTipColor[nToolTipIndex] = S.TimerInfo[nTimerId].nColor;
|
||||
MicroProfileStringArrayCopy(&g_MicroProfileUI.LockedToolTips[nToolTipIndex], &ToolTip);
|
||||
g_MicroProfileUI.LockedToolTipFront = nToolTipIndex;
|
||||
|
||||
}
|
||||
}
|
||||
@@ -917,9 +917,8 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
|
||||
float fStart = floor(fMsBase*fRcpStep) * fStep;
|
||||
for(float f = fStart; f < fMsEnd; )
|
||||
{
|
||||
float fStart = f;
|
||||
float fNext = f + fStep;
|
||||
MicroProfileDrawBox(((fStart-fMsBase) * fMsToScreen), nBaseY, (fNext-fMsBase) * fMsToScreen+1, nBaseY + nHeight, UI.nOpacityBackground | g_nMicroProfileBackColors[nColorIndex++ & 1]);
|
||||
MicroProfileDrawBox(((f-fMsBase) * fMsToScreen), nBaseY, (fNext-fMsBase) * fMsToScreen+1, nBaseY + nHeight, UI.nOpacityBackground | g_nMicroProfileBackColors[nColorIndex++ & 1]);
|
||||
f = fNext;
|
||||
}
|
||||
}
|
||||
@@ -1116,9 +1115,9 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
|
||||
|
||||
nMaxStackDepth = MicroProfileMax(nMaxStackDepth, nStackPos);
|
||||
float fMsStart = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickStart);
|
||||
float fMsEnd = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickEnd);
|
||||
float fMsEnd2 = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickEnd);
|
||||
float fXStart = fMsStart * fMsToScreen;
|
||||
float fXEnd = fMsEnd * fMsToScreen;
|
||||
float fXEnd = fMsEnd2 * fMsToScreen;
|
||||
float fYStart = (float)(nY + nStackPos * nYDelta);
|
||||
float fYEnd = fYStart + (MICROPROFILE_DETAILED_BAR_HEIGHT);
|
||||
float fXDist = MicroProfileMax(fXStart - fMouseX, fMouseX - fXEnd);
|
||||
@@ -1269,22 +1268,22 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
|
||||
if(UI.nRangeBegin != UI.nRangeEnd)
|
||||
{
|
||||
float fMsStart = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeBegin);
|
||||
float fMsEnd = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeEnd);
|
||||
float fMsEnd3 = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeEnd);
|
||||
float fXStart = fMsStart * fMsToScreen;
|
||||
float fXEnd = fMsEnd * fMsToScreen;
|
||||
float fXEnd = fMsEnd3 * fMsToScreen;
|
||||
MicroProfileDrawBox(fXStart, nBaseY, fXEnd, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawLineVertical(fXStart, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT | 0x44000000);
|
||||
MicroProfileDrawLineVertical(fXEnd, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT | 0x44000000);
|
||||
|
||||
fMsStart += fDetailedOffset;
|
||||
fMsEnd += fDetailedOffset;
|
||||
fMsEnd3 += fDetailedOffset;
|
||||
char sBuffer[32];
|
||||
uint32_t nLenStart = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsStart);
|
||||
float fStartTextWidth = (float)((1+MICROPROFILE_TEXT_WIDTH) * nLenStart);
|
||||
float fStartTextX = fXStart - fStartTextWidth - 2;
|
||||
MicroProfileDrawBox(fStartTextX, nBaseY, fStartTextX + fStartTextWidth + 2, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawText(fStartTextX+1, nBaseY, UINT32_MAX, sBuffer, nLenStart);
|
||||
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd);
|
||||
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd3);
|
||||
MicroProfileDrawBox(fXEnd+1, nBaseY, fXEnd+1+(1+MICROPROFILE_TEXT_WIDTH) * nLenEnd + 3, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawText(fXEnd+2, nBaseY+1, UINT32_MAX, sBuffer, nLenEnd);
|
||||
|
||||
@@ -1297,9 +1296,9 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
|
||||
if(UI.nRangeBeginGpu != UI.nRangeEndGpu)
|
||||
{
|
||||
float fMsStart = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeBeginGpu);
|
||||
float fMsEnd = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeEndGpu);
|
||||
float fMsEnd4 = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeEndGpu);
|
||||
float fXStart = fMsStart * fMsToScreen;
|
||||
float fXEnd = fMsEnd * fMsToScreen;
|
||||
float fXEnd = fMsEnd4 * fMsToScreen;
|
||||
MicroProfileDrawBox(fXStart, nBaseY, fXEnd, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawLineVertical(fXStart, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU | 0x44000000);
|
||||
MicroProfileDrawLineVertical(fXEnd, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU | 0x44000000);
|
||||
@@ -1307,14 +1306,14 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
|
||||
nBaseY += MICROPROFILE_TEXT_HEIGHT+1;
|
||||
|
||||
fMsStart += fDetailedOffset;
|
||||
fMsEnd += fDetailedOffset;
|
||||
fMsEnd4 += fDetailedOffset;
|
||||
char sBuffer[32];
|
||||
uint32_t nLenStart = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsStart);
|
||||
float fStartTextWidth = (float)((1+MICROPROFILE_TEXT_WIDTH) * nLenStart);
|
||||
float fStartTextX = fXStart - fStartTextWidth - 2;
|
||||
MicroProfileDrawBox(fStartTextX, nBaseY, fStartTextX + fStartTextWidth + 2, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawText(fStartTextX+1, nBaseY, UINT32_MAX, sBuffer, nLenStart);
|
||||
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd);
|
||||
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd4);
|
||||
MicroProfileDrawBox(fXEnd+1, nBaseY, fXEnd+1+(1+MICROPROFILE_TEXT_WIDTH) * nLenEnd + 3, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
|
||||
MicroProfileDrawText(fXEnd+2, nBaseY+1, UINT32_MAX, sBuffer, nLenEnd);
|
||||
}
|
||||
@@ -1716,8 +1715,8 @@ bool MicroProfileDrawGraph(uint32_t nScreenWidth, uint32_t nScreenHeight)
|
||||
uint32_t nTextCount = 0;
|
||||
uint32_t nGraphIndex = (S.nGraphPut + MICROPROFILE_GRAPH_HISTORY - int(MICROPROFILE_GRAPH_HISTORY*(1.f - fMouseXPrc))) % MICROPROFILE_GRAPH_HISTORY;
|
||||
|
||||
uint32_t nX = UI.nMouseX;
|
||||
uint32_t nY = UI.nMouseY + 20;
|
||||
uint32_t nMouseX = UI.nMouseX;
|
||||
uint32_t nMouseY = UI.nMouseY + 20;
|
||||
|
||||
for(uint32_t i = 0; i < MICROPROFILE_MAX_GRAPHS; ++i)
|
||||
{
|
||||
@@ -1736,7 +1735,7 @@ bool MicroProfileDrawGraph(uint32_t nScreenWidth, uint32_t nScreenHeight)
|
||||
}
|
||||
if(nTextCount)
|
||||
{
|
||||
MicroProfileDrawFloatWindow(nX, nY, Strings.ppStrings, Strings.nNumStrings, 0, pColors);
|
||||
MicroProfileDrawFloatWindow(nMouseX, nMouseY, Strings.ppStrings, Strings.nNumStrings, 0, pColors);
|
||||
}
|
||||
|
||||
if(UI.nMouseRight)
|
||||
@@ -2321,8 +2320,8 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
|
||||
uint32_t nMenuX[MICROPROFILE_MENU_MAX] = {0};
|
||||
uint32_t nNumMenuItems = 0;
|
||||
|
||||
int nLen = snprintf(buffer, 127, "MicroProfile");
|
||||
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nLen);
|
||||
int nMPTextLen = snprintf(buffer, 127, "MicroProfile");
|
||||
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nMPTextLen);
|
||||
nX += (sizeof("MicroProfile")+2) * (MICROPROFILE_TEXT_WIDTH+1);
|
||||
pMenuText[nNumMenuItems++] = "Mode";
|
||||
pMenuText[nNumMenuItems++] = "Groups";
|
||||
@@ -2438,16 +2437,16 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
|
||||
int nNumLines = 0;
|
||||
bool bSelected = false;
|
||||
const char* pString = CB(nNumLines, &bSelected);
|
||||
uint32_t nWidth = 0, nHeight = 0;
|
||||
uint32_t nTextWidth = 0, nTextHeight = 0;
|
||||
while(pString)
|
||||
{
|
||||
nWidth = MicroProfileMax<int>(nWidth, (int)strlen(pString));
|
||||
nTextWidth = MicroProfileMax<int>(nTextWidth, (int)strlen(pString));
|
||||
nNumLines++;
|
||||
pString = CB(nNumLines, &bSelected);
|
||||
}
|
||||
nWidth = (2+nWidth) * (MICROPROFILE_TEXT_WIDTH+1);
|
||||
nHeight = nNumLines * (MICROPROFILE_TEXT_HEIGHT+1);
|
||||
if(UI.nMouseY <= nY + nHeight+0 && UI.nMouseY >= nY-0 && UI.nMouseX <= nX + nWidth + 0 && UI.nMouseX >= nX - 0)
|
||||
nTextWidth = (2+nTextWidth) * (MICROPROFILE_TEXT_WIDTH+1);
|
||||
nTextHeight = nNumLines * (MICROPROFILE_TEXT_HEIGHT+1);
|
||||
if(UI.nMouseY <= nY + nTextHeight+0 && UI.nMouseY >= nY-0 && UI.nMouseX <= nX + nTextWidth + 0 && UI.nMouseX >= nX - 0)
|
||||
{
|
||||
UI.nActiveMenu = nMenu;
|
||||
}
|
||||
@@ -2455,21 +2454,21 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
|
||||
{
|
||||
UI.nActiveMenu = UINT32_MAX;
|
||||
}
|
||||
MicroProfileDrawBox(nX, nY, nX + nWidth, nY + nHeight, 0xff000000|g_nMicroProfileBackColors[1]);
|
||||
MicroProfileDrawBox(nX, nY, nX + nTextWidth, nY + nTextHeight, 0xff000000|g_nMicroProfileBackColors[1]);
|
||||
for(int i = 0; i < nNumLines; ++i)
|
||||
{
|
||||
bool bSelected = false;
|
||||
const char* pString = CB(i, &bSelected);
|
||||
bool bSelected2 = false;
|
||||
const char* pString2 = CB(i, &bSelected2);
|
||||
if(UI.nMouseY >= nY && UI.nMouseY < nY + MICROPROFILE_TEXT_HEIGHT + 1)
|
||||
{
|
||||
if(UI.nMouseLeft || UI.nMouseRight)
|
||||
{
|
||||
CBClick[nMenu](i);
|
||||
}
|
||||
MicroProfileDrawBox(nX, nY, nX + nWidth, nY + MICROPROFILE_TEXT_HEIGHT + 1, 0xff888888);
|
||||
MicroProfileDrawBox(nX, nY, nX + nTextWidth, nY + MICROPROFILE_TEXT_HEIGHT + 1, 0xff888888);
|
||||
}
|
||||
int nLen = snprintf(buffer, SBUF_SIZE-1, "%c %s", bSelected ? '*' : ' ' ,pString);
|
||||
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nLen);
|
||||
int nTextLen = snprintf(buffer, SBUF_SIZE-1, "%c %s", bSelected2 ? '*' : ' ' ,pString2);
|
||||
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nTextLen);
|
||||
nY += MICROPROFILE_TEXT_HEIGHT+1;
|
||||
}
|
||||
}
|
||||
@@ -2605,7 +2604,7 @@ inline void MicroProfileDrawCustom(uint32_t nWidth, uint32_t nHeight)
|
||||
for(uint32_t i = 0; i < nCount; ++i)
|
||||
{
|
||||
nOffsetY += (1+MICROPROFILE_TEXT_HEIGHT);
|
||||
uint32_t nWidth = MicroProfileMin(nMaxWidth, (uint32_t)(nMaxWidth * pMs[i] * fRcpReference));
|
||||
nWidth = MicroProfileMin(nMaxWidth, (uint32_t)(nMaxWidth * pMs[i] * fRcpReference));
|
||||
MicroProfileDrawBox(nMaxOffsetX, nOffsetY, nMaxOffsetX+nWidth, nOffsetY+MICROPROFILE_TEXT_HEIGHT, pColors[i]|0xff000000);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,10 @@ if (MSVC)
|
||||
/we4305 # 'context': truncation from 'type1' to 'type2'
|
||||
/we4388 # 'expression': signed/unsigned mismatch
|
||||
/we4389 # 'operator': signed/unsigned mismatch
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
/we4457 # Declaration of 'identifier' hides function parameter
|
||||
/we4458 # Declaration of 'identifier' hides class member
|
||||
/we4459 # Declaration of 'identifier' hides global declaration
|
||||
/we4505 # 'function': unreferenced local function has been removed
|
||||
/we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
|
||||
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
|
||||
@@ -92,6 +96,7 @@ else()
|
||||
-Werror=missing-declarations
|
||||
-Werror=missing-field-initializers
|
||||
-Werror=reorder
|
||||
-Werror=shadow
|
||||
-Werror=sign-compare
|
||||
-Werror=switch
|
||||
-Werror=uninitialized
|
||||
|
||||
@@ -49,9 +49,6 @@ if (NOT MSVC)
|
||||
target_compile_options(audio_core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
-Werror=shadow
|
||||
-Werror=unused-parameter
|
||||
-Werror=unused-variable
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
|
||||
@@ -98,13 +98,13 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing_, Core::Memor
|
||||
|
||||
AudioRenderer::~AudioRenderer() = default;
|
||||
|
||||
ResultCode AudioRenderer::Start() {
|
||||
Result AudioRenderer::Start() {
|
||||
audio_out->StartStream(stream);
|
||||
ReleaseAndQueueBuffers();
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode AudioRenderer::Stop() {
|
||||
Result AudioRenderer::Stop() {
|
||||
audio_out->StopStream(stream);
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -125,8 +125,8 @@ Stream::State AudioRenderer::GetStreamState() const {
|
||||
return stream->GetState();
|
||||
}
|
||||
|
||||
ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params) {
|
||||
Result AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params) {
|
||||
std::scoped_lock lock{mutex};
|
||||
InfoUpdater info_updater{input_params, output_params, behavior_info};
|
||||
|
||||
|
||||
@@ -43,10 +43,10 @@ public:
|
||||
Stream::ReleaseCallback&& release_callback, std::size_t instance_number);
|
||||
~AudioRenderer();
|
||||
|
||||
[[nodiscard]] ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params);
|
||||
[[nodiscard]] ResultCode Start();
|
||||
[[nodiscard]] ResultCode Stop();
|
||||
[[nodiscard]] Result UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
std::vector<u8>& output_params);
|
||||
[[nodiscard]] Result Start();
|
||||
[[nodiscard]] Result Stop();
|
||||
void QueueMixedBuffer(Buffer::Tag tag);
|
||||
void ReleaseAndQueueBuffers();
|
||||
[[nodiscard]] u32 GetSampleRate() const;
|
||||
|
||||
@@ -429,7 +429,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
|
||||
in_params.node_id);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1312,7 +1312,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::s
|
||||
samples_to_read - samples_read, channel, temp_mix_offset);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
|
||||
}
|
||||
|
||||
temp_mix_offset += samples_decoded;
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
namespace AudioCommon {
|
||||
namespace Audren {
|
||||
constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
|
||||
constexpr ResultCode ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
|
||||
constexpr Result ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
|
||||
constexpr Result ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
|
||||
} // namespace Audren
|
||||
|
||||
constexpr u8 BASE_REVISION = '0';
|
||||
|
||||
@@ -50,7 +50,7 @@ EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
|
||||
effects[i] = std::make_unique<EffectBiquadFilter>();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented effect {}", effect);
|
||||
ASSERT_MSG(false, "Unimplemented effect {}", effect);
|
||||
effects[i] = std::make_unique<EffectStubbed>();
|
||||
}
|
||||
return GetInfo(i);
|
||||
@@ -104,7 +104,7 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
|
||||
auto& params = GetParams();
|
||||
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
|
||||
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
|
||||
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
|
||||
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -285,9 +285,8 @@ bool InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context,
|
||||
EffectContext& effect_context) {
|
||||
Result InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context) {
|
||||
std::vector<MixInfo::InParams> mix_in_params;
|
||||
|
||||
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
|
||||
|
||||
@@ -32,8 +32,8 @@ public:
|
||||
VAddr audio_codec_dsp_addr);
|
||||
bool UpdateEffects(EffectContext& effect_context, bool is_active);
|
||||
bool UpdateSplitterInfo(SplitterContext& splitter_context);
|
||||
ResultCode UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context);
|
||||
Result UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
|
||||
SplitterContext& splitter_context, EffectContext& effect_context);
|
||||
bool UpdateSinks(SinkContext& sink_context);
|
||||
bool UpdatePerformanceBuffer();
|
||||
bool UpdateErrorInfo(BehaviorInfo& in_behavior_info);
|
||||
|
||||
@@ -483,7 +483,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
|
||||
// Add more work
|
||||
index_stack.push(j);
|
||||
} else if (node_state == NodeStates::State::InFound) {
|
||||
UNREACHABLE_MSG("Node start marked as found");
|
||||
ASSERT_MSG(false, "Node start marked as found");
|
||||
ResetState();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
|
||||
in_params.current_playstate = ServerPlayState::Play;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
|
||||
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
|
||||
return in_params.should_depop;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
|
||||
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
@@ -6,8 +6,13 @@
|
||||
|
||||
#include "common/settings.h"
|
||||
|
||||
void assert_handle_failure() {
|
||||
void assert_fail_impl() {
|
||||
if (Settings::values.use_debug_asserts) {
|
||||
Crash();
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] void unreachable_impl() {
|
||||
Crash();
|
||||
throw std::runtime_error("Unreachable code");
|
||||
}
|
||||
|
||||
@@ -9,44 +9,43 @@
|
||||
// Sometimes we want to try to continue even after hitting an assert.
|
||||
// However touching this file yields a global recompilation as this header is included almost
|
||||
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
|
||||
void assert_handle_failure();
|
||||
|
||||
// For asserts we'd like to keep all the junk executed when an assert happens away from the
|
||||
// important code in the function. One way of doing this is to put all the relevant code inside a
|
||||
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
|
||||
// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
|
||||
// template that calls the lambda. This seems to generate an extra instruction at the call-site
|
||||
// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
|
||||
// enough for our purposes.
|
||||
template <typename Fn>
|
||||
#if defined(_MSC_VER)
|
||||
[[msvc::noinline]]
|
||||
#elif defined(__GNUC__)
|
||||
[[gnu::cold, gnu::noinline]]
|
||||
void assert_fail_impl();
|
||||
[[noreturn]] void unreachable_impl();
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define YUZU_NO_INLINE __declspec(noinline)
|
||||
#else
|
||||
#define YUZU_NO_INLINE __attribute__((noinline))
|
||||
#endif
|
||||
static void
|
||||
assert_noinline_call(const Fn& fn) {
|
||||
fn();
|
||||
assert_handle_failure();
|
||||
}
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
([&]() YUZU_NO_INLINE { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed!"); \
|
||||
assert_fail_impl(); \
|
||||
} \
|
||||
while (0)
|
||||
}())
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
([&]() YUZU_NO_INLINE { \
|
||||
if (!(_a_)) [[unlikely]] { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
|
||||
assert_fail_impl(); \
|
||||
} \
|
||||
while (0)
|
||||
}())
|
||||
|
||||
#define UNREACHABLE() \
|
||||
do { \
|
||||
LOG_CRITICAL(Debug, "Unreachable code!"); \
|
||||
unreachable_impl(); \
|
||||
} while (0)
|
||||
|
||||
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
|
||||
#define UNREACHABLE_MSG(...) \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
|
||||
do { \
|
||||
LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
|
||||
unreachable_impl(); \
|
||||
} while (0)
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#pragma once
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4324)
|
||||
#endif
|
||||
|
||||
#include <atomic>
|
||||
#include <bit>
|
||||
@@ -12,105 +9,63 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
#include <stdexcept>
|
||||
#include <stop_token>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
namespace Common {
|
||||
namespace mpsc {
|
||||
|
||||
#if defined(__cpp_lib_hardware_interference_size)
|
||||
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
|
||||
#else
|
||||
constexpr size_t hardware_interference_size = 64;
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
using AlignedAllocator = std::allocator<T>;
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4324)
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
struct Slot {
|
||||
~Slot() noexcept {
|
||||
if (turn.test()) {
|
||||
destroy();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void construct(Args&&... args) noexcept {
|
||||
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
|
||||
"T must be nothrow constructible with Args&&...");
|
||||
std::construct_at(reinterpret_cast<T*>(&storage), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void destroy() noexcept {
|
||||
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
|
||||
std::destroy_at(reinterpret_cast<T*>(&storage));
|
||||
}
|
||||
|
||||
T&& move() noexcept {
|
||||
return reinterpret_cast<T&&>(storage);
|
||||
}
|
||||
|
||||
// Align to avoid false sharing between adjacent slots
|
||||
alignas(hardware_interference_size) std::atomic_flag turn{};
|
||||
struct aligned_store {
|
||||
struct type {
|
||||
alignas(T) unsigned char data[sizeof(T)];
|
||||
};
|
||||
};
|
||||
typename aligned_store::type storage;
|
||||
};
|
||||
|
||||
template <typename T, typename Allocator = AlignedAllocator<Slot<T>>>
|
||||
class Queue {
|
||||
template <typename T, size_t capacity = 0x400>
|
||||
class MPSCQueue {
|
||||
public:
|
||||
explicit Queue(const size_t capacity, const Allocator& allocator = Allocator())
|
||||
: allocator_(allocator) {
|
||||
if (capacity < 1) {
|
||||
throw std::invalid_argument("capacity < 1");
|
||||
}
|
||||
// Ensure that the queue length is an integer power of 2
|
||||
// This is so that idx(i) can be a simple i & mask_ insted of i % capacity
|
||||
// https://github.com/rigtorp/MPMCQueue/pull/36
|
||||
if (!std::has_single_bit(capacity)) {
|
||||
throw std::invalid_argument("capacity must be an integer power of 2");
|
||||
}
|
||||
|
||||
mask_ = capacity - 1;
|
||||
|
||||
explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
|
||||
// Allocate one extra slot to prevent false sharing on the last slot
|
||||
slots_ = allocator_.allocate(mask_ + 2);
|
||||
slots = allocator.allocate(capacity + 1);
|
||||
// Allocators are not required to honor alignment for over-aligned types
|
||||
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
|
||||
// alignment here
|
||||
if (reinterpret_cast<uintptr_t>(slots_) % alignof(Slot<T>) != 0) {
|
||||
allocator_.deallocate(slots_, mask_ + 2);
|
||||
if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
|
||||
allocator.deallocate(slots, capacity + 1);
|
||||
throw std::bad_alloc();
|
||||
}
|
||||
for (size_t i = 0; i < mask_ + 1; ++i) {
|
||||
std::construct_at(&slots_[i]);
|
||||
for (size_t i = 0; i < capacity; ++i) {
|
||||
std::construct_at(&slots[i]);
|
||||
}
|
||||
static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
|
||||
static_assert(alignof(Slot<T>) == hardware_interference_size,
|
||||
"Slot must be aligned to cache line boundary to prevent false sharing");
|
||||
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
|
||||
"Slot size must be a multiple of cache line size to prevent "
|
||||
"false sharing between adjacent slots");
|
||||
static_assert(sizeof(Queue) % hardware_interference_size == 0,
|
||||
static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
|
||||
"Queue size must be a multiple of cache line size to "
|
||||
"prevent false sharing between adjacent queues");
|
||||
}
|
||||
|
||||
~Queue() noexcept {
|
||||
for (size_t i = 0; i < mask_ + 1; ++i) {
|
||||
slots_[i].~Slot();
|
||||
~MPSCQueue() noexcept {
|
||||
for (size_t i = 0; i < capacity; ++i) {
|
||||
std::destroy_at(&slots[i]);
|
||||
}
|
||||
allocator_.deallocate(slots_, mask_ + 2);
|
||||
allocator.deallocate(slots, capacity + 1);
|
||||
}
|
||||
|
||||
// non-copyable and non-movable
|
||||
Queue(const Queue&) = delete;
|
||||
Queue& operator=(const Queue&) = delete;
|
||||
// The queue must be both non-copyable and non-movable
|
||||
MPSCQueue(const MPSCQueue&) = delete;
|
||||
MPSCQueue& operator=(const MPSCQueue&) = delete;
|
||||
|
||||
MPSCQueue(MPSCQueue&&) = delete;
|
||||
MPSCQueue& operator=(MPSCQueue&&) = delete;
|
||||
|
||||
void Push(const T& v) noexcept {
|
||||
static_assert(std::is_nothrow_copy_constructible_v<T>,
|
||||
@@ -125,8 +80,8 @@ public:
|
||||
|
||||
void Pop(T& v, std::stop_token stop) noexcept {
|
||||
auto const tail = tail_.fetch_add(1);
|
||||
auto& slot = slots_[idx(tail)];
|
||||
if (false == slot.turn.test()) {
|
||||
auto& slot = slots[idx(tail)];
|
||||
if (!slot.turn.test()) {
|
||||
std::unique_lock lock{cv_mutex};
|
||||
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
|
||||
}
|
||||
@@ -137,12 +92,46 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename U = T>
|
||||
struct Slot {
|
||||
~Slot() noexcept {
|
||||
if (turn.test()) {
|
||||
destroy();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void construct(Args&&... args) noexcept {
|
||||
static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
|
||||
"T must be nothrow constructible with Args&&...");
|
||||
std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void destroy() noexcept {
|
||||
static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
|
||||
std::destroy_at(reinterpret_cast<U*>(&storage));
|
||||
}
|
||||
|
||||
U&& move() noexcept {
|
||||
return reinterpret_cast<U&&>(storage);
|
||||
}
|
||||
|
||||
// Align to avoid false sharing between adjacent slots
|
||||
alignas(hardware_interference_size) std::atomic_flag turn{};
|
||||
struct aligned_store {
|
||||
struct type {
|
||||
alignas(U) unsigned char data[sizeof(U)];
|
||||
};
|
||||
};
|
||||
typename aligned_store::type storage;
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
void emplace(Args&&... args) noexcept {
|
||||
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
|
||||
"T must be nothrow constructible with Args&&...");
|
||||
auto const head = head_.fetch_add(1);
|
||||
auto& slot = slots_[idx(head)];
|
||||
auto& slot = slots[idx(head)];
|
||||
slot.turn.wait(true);
|
||||
slot.construct(std::forward<Args>(args)...);
|
||||
slot.turn.test_and_set();
|
||||
@@ -150,31 +139,29 @@ private:
|
||||
}
|
||||
|
||||
constexpr size_t idx(size_t i) const noexcept {
|
||||
return i & mask_;
|
||||
return i & mask;
|
||||
}
|
||||
|
||||
std::conditional_t<true, std::condition_variable_any, std::condition_variable> cv;
|
||||
std::mutex cv_mutex;
|
||||
size_t mask_;
|
||||
Slot<T>* slots_;
|
||||
[[no_unique_address]] Allocator allocator_;
|
||||
static constexpr size_t mask = capacity - 1;
|
||||
|
||||
// Align to avoid false sharing between head_ and tail_
|
||||
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
|
||||
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
|
||||
|
||||
std::mutex cv_mutex;
|
||||
std::condition_variable_any cv;
|
||||
|
||||
Slot<T>* slots;
|
||||
[[no_unique_address]] std::allocator<Slot<T>> allocator;
|
||||
|
||||
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
|
||||
"T must be nothrow copy or move assignable");
|
||||
|
||||
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
|
||||
};
|
||||
} // namespace mpsc
|
||||
|
||||
template <typename T, typename Allocator = mpsc::AlignedAllocator<mpsc::Slot<T>>>
|
||||
using MPSCQueue = mpsc::Queue<T, Allocator>;
|
||||
|
||||
} // namespace Common
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -33,9 +33,9 @@ void DetachedTasks::AddTask(std::function<void()> task) {
|
||||
++instance->count;
|
||||
std::thread([task{std::move(task)}]() {
|
||||
task();
|
||||
std::unique_lock lock{instance->mutex};
|
||||
std::unique_lock thread_lock{instance->mutex};
|
||||
--instance->count;
|
||||
std::notify_all_at_thread_exit(instance->cv, std::move(lock));
|
||||
std::notify_all_at_thread_exit(instance->cv, std::move(thread_lock));
|
||||
}).detach();
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,9 @@ enum class PageType : u8 {
|
||||
Unmapped,
|
||||
/// Page is mapped to regular memory. This is the only type you can get pointers to.
|
||||
Memory,
|
||||
/// Page is mapped to regular memory, but inaccessible from CPU fastmem and must use
|
||||
/// the callbacks.
|
||||
DebugMemory,
|
||||
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
|
||||
/// invalidation
|
||||
RasterizerCachedMemory,
|
||||
|
||||
@@ -76,7 +76,7 @@ std::string ParamPackage::Serialize() const {
|
||||
std::string ParamPackage::Get(const std::string& key, const std::string& default_value) const {
|
||||
auto pair = data.find(key);
|
||||
if (pair == data.end()) {
|
||||
LOG_DEBUG(Common, "key '{}' not found", key);
|
||||
LOG_TRACE(Common, "key '{}' not found", key);
|
||||
return default_value;
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ std::string ParamPackage::Get(const std::string& key, const std::string& default
|
||||
int ParamPackage::Get(const std::string& key, int default_value) const {
|
||||
auto pair = data.find(key);
|
||||
if (pair == data.end()) {
|
||||
LOG_DEBUG(Common, "key '{}' not found", key);
|
||||
LOG_TRACE(Common, "key '{}' not found", key);
|
||||
return default_value;
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ int ParamPackage::Get(const std::string& key, int default_value) const {
|
||||
float ParamPackage::Get(const std::string& key, float default_value) const {
|
||||
auto pair = data.find(key);
|
||||
if (pair == data.end()) {
|
||||
LOG_DEBUG(Common, "key {} not found", key);
|
||||
LOG_TRACE(Common, "key {} not found", key);
|
||||
return default_value;
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,7 @@ void UpdateRescalingInfo() {
|
||||
info.down_shift = 0;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
info.up_scale = 1;
|
||||
info.down_shift = 0;
|
||||
}
|
||||
|
||||
@@ -47,6 +47,9 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
case ThreadPriority::VeryHigh:
|
||||
windows_priority = THREAD_PRIORITY_HIGHEST;
|
||||
break;
|
||||
case ThreadPriority::Critical:
|
||||
windows_priority = THREAD_PRIORITY_TIME_CRITICAL;
|
||||
break;
|
||||
default:
|
||||
windows_priority = THREAD_PRIORITY_NORMAL;
|
||||
break;
|
||||
@@ -59,9 +62,10 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
pthread_t this_thread = pthread_self();
|
||||
|
||||
s32 max_prio = sched_get_priority_max(SCHED_OTHER);
|
||||
s32 min_prio = sched_get_priority_min(SCHED_OTHER);
|
||||
u32 level = static_cast<u32>(new_priority) + 1;
|
||||
const auto scheduling_type = SCHED_OTHER;
|
||||
s32 max_prio = sched_get_priority_max(scheduling_type);
|
||||
s32 min_prio = sched_get_priority_min(scheduling_type);
|
||||
u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
|
||||
|
||||
struct sched_param params;
|
||||
if (max_prio > min_prio) {
|
||||
@@ -70,7 +74,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
|
||||
params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4;
|
||||
}
|
||||
|
||||
pthread_setschedparam(this_thread, SCHED_OTHER, ¶ms);
|
||||
pthread_setschedparam(this_thread, scheduling_type, ¶ms);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -92,6 +92,7 @@ enum class ThreadPriority : u32 {
|
||||
Normal = 1,
|
||||
High = 2,
|
||||
VeryHigh = 3,
|
||||
Critical = 4,
|
||||
};
|
||||
|
||||
void SetCurrentThreadPriority(ThreadPriority new_priority);
|
||||
|
||||
@@ -30,6 +30,10 @@ namespace Common {
|
||||
#else
|
||||
return _udiv128(r[1], r[0], d, &remainder);
|
||||
#endif
|
||||
#else
|
||||
#ifdef __SIZEOF_INT128__
|
||||
const auto product = static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b);
|
||||
return static_cast<u64>(product / d);
|
||||
#else
|
||||
const u64 diva = a / d;
|
||||
const u64 moda = a % d;
|
||||
@@ -37,6 +41,7 @@ namespace Common {
|
||||
const u64 modb = b % d;
|
||||
return diva * b + moda * divb + moda * modb / d;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// This function multiplies 2 u64 values and produces a u128 value;
|
||||
|
||||
@@ -75,8 +75,8 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
|
||||
}
|
||||
|
||||
u64 NativeClock::GetRTSC() {
|
||||
TimePoint new_time_point{};
|
||||
TimePoint current_time_point{};
|
||||
TimePoint new_time_point{};
|
||||
|
||||
current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
|
||||
do {
|
||||
@@ -89,8 +89,7 @@ u64 NativeClock::GetRTSC() {
|
||||
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
|
||||
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
|
||||
current_time_point.pack, current_time_point.pack));
|
||||
/// The clock cannot be more precise than the guest timer, remove the lower bits
|
||||
return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
|
||||
return new_time_point.inner.accumulated_ticks;
|
||||
}
|
||||
|
||||
void NativeClock::Pause(bool is_paused) {
|
||||
|
||||
@@ -37,12 +37,8 @@ private:
|
||||
} inner;
|
||||
};
|
||||
|
||||
/// value used to reduce the native clocks accuracy as some apss rely on
|
||||
/// undefined behavior where the level of accuracy in the clock shouldn't
|
||||
/// be higher.
|
||||
static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
|
||||
|
||||
TimePoint time_point;
|
||||
|
||||
// factors
|
||||
u64 clock_rtsc_factor{};
|
||||
u64 cpu_rtsc_factor{};
|
||||
|
||||
@@ -222,7 +222,7 @@ add_library(core STATIC
|
||||
hle/kernel/k_page_buffer.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
hle/kernel/k_page_linked_list.h
|
||||
hle/kernel/k_page_group.h
|
||||
hle/kernel/k_page_table.cpp
|
||||
hle/kernel/k_page_table.h
|
||||
hle/kernel/k_port.cpp
|
||||
@@ -743,16 +743,11 @@ if (MSVC)
|
||||
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
|
||||
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
|
||||
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
|
||||
/we4456 # Declaration of 'identifier' hides previous local declaration
|
||||
/we4457 # Declaration of 'identifier' hides function parameter
|
||||
/we4458 # Declaration of 'identifier' hides class member
|
||||
/we4459 # Declaration of 'identifier' hides global declaration
|
||||
)
|
||||
else()
|
||||
target_compile_options(core PRIVATE
|
||||
-Werror=conversion
|
||||
-Werror=ignored-qualifiers
|
||||
-Werror=shadow
|
||||
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
|
||||
@@ -95,7 +95,7 @@ void ARM_Interface::Run() {
|
||||
using Kernel::SuspendType;
|
||||
|
||||
while (true) {
|
||||
Kernel::KThread* current_thread{system.Kernel().CurrentScheduler()->GetCurrentThread()};
|
||||
Kernel::KThread* current_thread{Kernel::GetCurrentThreadPointer(system.Kernel())};
|
||||
Dynarmic::HaltReason hr{};
|
||||
|
||||
// Notify the debugger and go to sleep if a step was performed
|
||||
@@ -107,6 +107,7 @@ void ARM_Interface::Run() {
|
||||
}
|
||||
|
||||
// Otherwise, run the thread.
|
||||
system.EnterDynarmicProfile();
|
||||
if (current_thread->GetStepState() == StepState::StepPending) {
|
||||
hr = StepJit();
|
||||
|
||||
@@ -116,14 +117,29 @@ void ARM_Interface::Run() {
|
||||
} else {
|
||||
hr = RunJit();
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit.
|
||||
if (Has(hr, breakpoint)) {
|
||||
system.GetDebugger().NotifyThreadStopped(current_thread);
|
||||
// Notify the debugger and go to sleep if a breakpoint was hit,
|
||||
// or if the thread is unable to continue for any reason.
|
||||
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
||||
RewindBreakpointInstruction();
|
||||
if (system.DebuggerEnabled()) {
|
||||
system.GetDebugger().NotifyThreadStopped(current_thread);
|
||||
}
|
||||
current_thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
break;
|
||||
}
|
||||
|
||||
// Notify the debugger and go to sleep if a watchpoint was hit.
|
||||
if (Has(hr, watchpoint)) {
|
||||
RewindBreakpointInstruction();
|
||||
if (system.DebuggerEnabled()) {
|
||||
system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
|
||||
}
|
||||
current_thread->RequestSuspend(SuspendType::Debug);
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle syscalls and scheduling (this may change the current thread)
|
||||
if (Has(hr, svc_call)) {
|
||||
Kernel::Svc::Call(system, GetSvcNumber());
|
||||
@@ -134,4 +150,36 @@ void ARM_Interface::Run() {
|
||||
}
|
||||
}
|
||||
|
||||
void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
|
||||
watchpoints = ℘
|
||||
}
|
||||
|
||||
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
|
||||
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const {
|
||||
if (!watchpoints) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const VAddr start_address{addr};
|
||||
const VAddr end_address{addr + size};
|
||||
|
||||
for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
|
||||
const auto& watch{(*watchpoints)[i]};
|
||||
|
||||
if (end_address <= watch.start_address) {
|
||||
continue;
|
||||
}
|
||||
if (start_address >= watch.end_address) {
|
||||
continue;
|
||||
}
|
||||
if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return &watch;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include <dynarmic/interface/halt_reason.h>
|
||||
@@ -19,13 +20,16 @@ struct PageTable;
|
||||
|
||||
namespace Kernel {
|
||||
enum class VMAPermission : u8;
|
||||
}
|
||||
enum class DebugWatchpointType : u8;
|
||||
struct DebugWatchpoint;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
class CPUInterruptHandler;
|
||||
|
||||
using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
|
||||
using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>;
|
||||
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ARM_Interface {
|
||||
@@ -170,6 +174,7 @@ public:
|
||||
virtual void SaveContext(ThreadContext64& ctx) = 0;
|
||||
virtual void LoadContext(const ThreadContext32& ctx) = 0;
|
||||
virtual void LoadContext(const ThreadContext64& ctx) = 0;
|
||||
void LoadWatchpointArray(const WatchpointArray& wp);
|
||||
|
||||
/// Clears the exclusive monitor's state.
|
||||
virtual void ClearExclusiveState() = 0;
|
||||
@@ -198,18 +203,25 @@ public:
|
||||
static constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
|
||||
static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
|
||||
static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4;
|
||||
static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5;
|
||||
static constexpr Dynarmic::HaltReason no_execute = Dynarmic::HaltReason::UserDefined6;
|
||||
|
||||
protected:
|
||||
/// System context that this ARM interface is running under.
|
||||
System& system;
|
||||
CPUInterrupts& interrupt_handlers;
|
||||
const WatchpointArray* watchpoints;
|
||||
bool uses_wall_clock;
|
||||
|
||||
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
|
||||
const Kernel::DebugWatchpoint* MatchingWatchpoint(
|
||||
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const;
|
||||
|
||||
virtual Dynarmic::HaltReason RunJit() = 0;
|
||||
virtual Dynarmic::HaltReason StepJit() = 0;
|
||||
virtual u32 GetSvcNumber() const = 0;
|
||||
virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
|
||||
virtual void RewindBreakpointInstruction() = 0;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -29,64 +29,94 @@ using namespace Common::Literals;
|
||||
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_)
|
||||
: parent{parent_}, memory(parent.system.Memory()) {}
|
||||
: parent{parent_},
|
||||
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()} {}
|
||||
|
||||
u8 MemoryRead8(u32 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read8(vaddr);
|
||||
}
|
||||
u16 MemoryRead16(u32 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read16(vaddr);
|
||||
}
|
||||
u32 MemoryRead32(u32 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read32(vaddr);
|
||||
}
|
||||
u64 MemoryRead64(u32 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read64(vaddr);
|
||||
}
|
||||
std::optional<u32> MemoryReadCode(u32 vaddr) override {
|
||||
if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return MemoryRead32(vaddr);
|
||||
}
|
||||
|
||||
void MemoryWrite8(u32 vaddr, u8 value) override {
|
||||
memory.Write8(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write8(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite16(u32 vaddr, u16 value) override {
|
||||
memory.Write16(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write16(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite32(u32 vaddr, u32 value) override {
|
||||
memory.Write32(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write32(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite64(u32 vaddr, u64 value) override {
|
||||
memory.Write64(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write64(vaddr, value);
|
||||
}
|
||||
}
|
||||
|
||||
bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
|
||||
return memory.WriteExclusive8(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive8(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
|
||||
return memory.WriteExclusive16(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive16(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
|
||||
return memory.WriteExclusive32(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive32(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
|
||||
return memory.WriteExclusive64(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive64(vaddr, value, expected);
|
||||
}
|
||||
|
||||
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
|
||||
parent.LogBacktrace();
|
||||
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
|
||||
MemoryReadCode(pc));
|
||||
LOG_ERROR(Core_ARM,
|
||||
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryRead32(pc));
|
||||
}
|
||||
|
||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||
if (parent.system.DebuggerEnabled()) {
|
||||
parent.jit.load()->Regs()[15] = pc;
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
|
||||
switch (exception) {
|
||||
case Dynarmic::A32::Exception::NoExecuteFault:
|
||||
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc);
|
||||
ReturnException(pc, ARM_Interface::no_execute);
|
||||
return;
|
||||
}
|
||||
default:
|
||||
if (debugger_enabled) {
|
||||
ReturnException(pc, ARM_Interface::breakpoint);
|
||||
return;
|
||||
}
|
||||
|
||||
parent.LogBacktrace();
|
||||
LOG_CRITICAL(Core_ARM,
|
||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
|
||||
parent.LogBacktrace();
|
||||
LOG_CRITICAL(Core_ARM,
|
||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||
exception, pc, MemoryRead32(pc), parent.IsInThumbMode());
|
||||
}
|
||||
}
|
||||
|
||||
void CallSVC(u32 swi) override {
|
||||
@@ -117,9 +147,31 @@ public:
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
if (!debugger_enabled) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const auto match{parent.MatchingWatchpoint(addr, size, type)};
|
||||
if (match) {
|
||||
parent.halted_watchpoint = match;
|
||||
ReturnException(parent.jit.load()->Regs()[15], ARM_Interface::watchpoint);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReturnException(u32 pc, Dynarmic::HaltReason hr) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.breakpoint_context.cpu_registers[15] = pc;
|
||||
parent.jit.load()->HaltExecution(hr);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32& parent;
|
||||
Core::Memory::Memory& memory;
|
||||
std::size_t num_interpreted_instructions{};
|
||||
bool debugger_enabled{};
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
@@ -154,6 +206,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
|
||||
config.code_cache_size = 512_MiB;
|
||||
config.far_code_offset = 400_MiB;
|
||||
|
||||
// Allow memory fault handling to work
|
||||
if (system.DebuggerEnabled()) {
|
||||
config.check_halt_on_memory_access = true;
|
||||
}
|
||||
|
||||
// null_jit
|
||||
if (!page_table) {
|
||||
// Don't waste too much memory on null_jit
|
||||
@@ -248,6 +305,14 @@ u32 ARM_Dynarmic_32::GetSvcNumber() const {
|
||||
return svc_swi;
|
||||
}
|
||||
|
||||
const Kernel::DebugWatchpoint* ARM_Dynarmic_32::HaltedWatchpoint() const {
|
||||
return halted_watchpoint;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::RewindBreakpointInstruction() {
|
||||
LoadContext(breakpoint_context);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32::ARM_Dynarmic_32(System& system_, CPUInterrupts& interrupt_handlers_,
|
||||
bool uses_wall_clock_, ExclusiveMonitor& exclusive_monitor_,
|
||||
std::size_t core_index_)
|
||||
|
||||
@@ -72,6 +72,8 @@ protected:
|
||||
Dynarmic::HaltReason RunJit() override;
|
||||
Dynarmic::HaltReason StepJit() override;
|
||||
u32 GetSvcNumber() const override;
|
||||
const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
|
||||
void RewindBreakpointInstruction() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
|
||||
@@ -98,6 +100,10 @@ private:
|
||||
|
||||
// SVC callback
|
||||
u32 svc_swi{};
|
||||
|
||||
// Watchpoint info
|
||||
const Kernel::DebugWatchpoint* halted_watchpoint;
|
||||
ThreadContext32 breakpoint_context;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -29,62 +29,89 @@ using namespace Common::Literals;
|
||||
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_)
|
||||
: parent{parent_}, memory(parent.system.Memory()) {}
|
||||
: parent{parent_},
|
||||
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()} {}
|
||||
|
||||
u8 MemoryRead8(u64 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read8(vaddr);
|
||||
}
|
||||
u16 MemoryRead16(u64 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read16(vaddr);
|
||||
}
|
||||
u32 MemoryRead32(u64 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read32(vaddr);
|
||||
}
|
||||
u64 MemoryRead64(u64 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
|
||||
return memory.Read64(vaddr);
|
||||
}
|
||||
Vector MemoryRead128(u64 vaddr) override {
|
||||
CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
|
||||
return {memory.Read64(vaddr), memory.Read64(vaddr + 8)};
|
||||
}
|
||||
std::optional<u32> MemoryReadCode(u64 vaddr) override {
|
||||
if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return MemoryRead32(vaddr);
|
||||
}
|
||||
|
||||
void MemoryWrite8(u64 vaddr, u8 value) override {
|
||||
memory.Write8(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write8(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite16(u64 vaddr, u16 value) override {
|
||||
memory.Write16(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write16(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite32(u64 vaddr, u32 value) override {
|
||||
memory.Write32(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write32(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite64(u64 vaddr, u64 value) override {
|
||||
memory.Write64(vaddr, value);
|
||||
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write64(vaddr, value);
|
||||
}
|
||||
}
|
||||
void MemoryWrite128(u64 vaddr, Vector value) override {
|
||||
memory.Write64(vaddr, value[0]);
|
||||
memory.Write64(vaddr + 8, value[1]);
|
||||
if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) {
|
||||
memory.Write64(vaddr, value[0]);
|
||||
memory.Write64(vaddr + 8, value[1]);
|
||||
}
|
||||
}
|
||||
|
||||
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
|
||||
return memory.WriteExclusive8(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive8(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
|
||||
return memory.WriteExclusive16(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive16(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
|
||||
return memory.WriteExclusive32(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive32(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
|
||||
return memory.WriteExclusive64(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive64(vaddr, value, expected);
|
||||
}
|
||||
bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
|
||||
return memory.WriteExclusive128(vaddr, value, expected);
|
||||
return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) &&
|
||||
memory.WriteExclusive128(vaddr, value, expected);
|
||||
}
|
||||
|
||||
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
|
||||
parent.LogBacktrace();
|
||||
LOG_ERROR(Core_ARM,
|
||||
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryReadCode(pc));
|
||||
num_instructions, MemoryRead32(pc));
|
||||
}
|
||||
|
||||
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
|
||||
@@ -117,16 +144,19 @@ public:
|
||||
case Dynarmic::A64::Exception::SendEventLocal:
|
||||
case Dynarmic::A64::Exception::Yield:
|
||||
return;
|
||||
case Dynarmic::A64::Exception::NoExecuteFault:
|
||||
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc);
|
||||
ReturnException(pc, ARM_Interface::no_execute);
|
||||
return;
|
||||
default:
|
||||
if (parent.system.DebuggerEnabled()) {
|
||||
parent.jit.load()->SetPC(pc);
|
||||
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
|
||||
if (debugger_enabled) {
|
||||
ReturnException(pc, ARM_Interface::breakpoint);
|
||||
return;
|
||||
}
|
||||
|
||||
parent.LogBacktrace();
|
||||
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
static_cast<std::size_t>(exception), pc, MemoryRead32(pc));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,10 +190,32 @@ public:
|
||||
return parent.system.CoreTiming().GetClockTicks();
|
||||
}
|
||||
|
||||
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
if (!debugger_enabled) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const auto match{parent.MatchingWatchpoint(addr, size, type)};
|
||||
if (match) {
|
||||
parent.halted_watchpoint = match;
|
||||
ReturnException(parent.jit.load()->GetPC(), ARM_Interface::watchpoint);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ReturnException(u64 pc, Dynarmic::HaltReason hr) {
|
||||
parent.SaveContext(parent.breakpoint_context);
|
||||
parent.breakpoint_context.pc = pc;
|
||||
parent.jit.load()->HaltExecution(hr);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_64& parent;
|
||||
Core::Memory::Memory& memory;
|
||||
u64 tpidrro_el0 = 0;
|
||||
u64 tpidr_el0 = 0;
|
||||
bool debugger_enabled{};
|
||||
static constexpr u64 minimum_run_cycles = 1000U;
|
||||
};
|
||||
|
||||
@@ -214,6 +266,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
|
||||
config.code_cache_size = 512_MiB;
|
||||
config.far_code_offset = 400_MiB;
|
||||
|
||||
// Allow memory fault handling to work
|
||||
if (system.DebuggerEnabled()) {
|
||||
config.check_halt_on_memory_access = true;
|
||||
}
|
||||
|
||||
// null_jit
|
||||
if (!page_table) {
|
||||
// Don't waste too much memory on null_jit
|
||||
@@ -308,6 +365,14 @@ u32 ARM_Dynarmic_64::GetSvcNumber() const {
|
||||
return svc_swi;
|
||||
}
|
||||
|
||||
const Kernel::DebugWatchpoint* ARM_Dynarmic_64::HaltedWatchpoint() const {
|
||||
return halted_watchpoint;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::RewindBreakpointInstruction() {
|
||||
LoadContext(breakpoint_context);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_64::ARM_Dynarmic_64(System& system_, CPUInterrupts& interrupt_handlers_,
|
||||
bool uses_wall_clock_, ExclusiveMonitor& exclusive_monitor_,
|
||||
std::size_t core_index_)
|
||||
|
||||
@@ -66,6 +66,8 @@ protected:
|
||||
Dynarmic::HaltReason RunJit() override;
|
||||
Dynarmic::HaltReason StepJit() override;
|
||||
u32 GetSvcNumber() const override;
|
||||
const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
|
||||
void RewindBreakpointInstruction() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
|
||||
@@ -91,6 +93,10 @@ private:
|
||||
|
||||
// SVC callback
|
||||
u32 svc_swi{};
|
||||
|
||||
// Breakpoint info
|
||||
const Kernel::DebugWatchpoint* halted_watchpoint;
|
||||
ThreadContext64 breakpoint_context;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -138,7 +138,6 @@ struct System::Impl {
|
||||
|
||||
kernel.Suspend(false);
|
||||
core_timing.SyncPause(false);
|
||||
cpu_manager.Pause(false);
|
||||
is_paused = false;
|
||||
|
||||
return status;
|
||||
@@ -150,25 +149,22 @@ struct System::Impl {
|
||||
|
||||
core_timing.SyncPause(true);
|
||||
kernel.Suspend(true);
|
||||
cpu_manager.Pause(true);
|
||||
is_paused = true;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> StallCPU() {
|
||||
std::unique_lock<std::mutex> StallProcesses() {
|
||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||
kernel.Suspend(true);
|
||||
core_timing.SyncPause(true);
|
||||
cpu_manager.Pause(true);
|
||||
return lk;
|
||||
}
|
||||
|
||||
void UnstallCPU() {
|
||||
void UnstallProcesses() {
|
||||
if (!is_paused) {
|
||||
core_timing.SyncPause(false);
|
||||
kernel.Suspend(false);
|
||||
cpu_manager.Pause(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,6 +330,8 @@ struct System::Impl {
|
||||
gpu_core->NotifyShutdown();
|
||||
}
|
||||
|
||||
kernel.ShutdownCores();
|
||||
cpu_manager.Shutdown();
|
||||
debugger.reset();
|
||||
services.reset();
|
||||
service_manager.reset();
|
||||
@@ -493,12 +491,18 @@ void System::Shutdown() {
|
||||
impl->Shutdown();
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> System::StallCPU() {
|
||||
return impl->StallCPU();
|
||||
void System::DetachDebugger() {
|
||||
if (impl->debugger) {
|
||||
impl->debugger->NotifyShutdown();
|
||||
}
|
||||
}
|
||||
|
||||
void System::UnstallCPU() {
|
||||
impl->UnstallCPU();
|
||||
std::unique_lock<std::mutex> System::StallProcesses() {
|
||||
return impl->StallProcesses();
|
||||
}
|
||||
|
||||
void System::UnstallProcesses() {
|
||||
impl->UnstallProcesses();
|
||||
}
|
||||
|
||||
void System::InitializeDebugger() {
|
||||
|
||||
@@ -160,8 +160,11 @@ public:
|
||||
/// Shutdown the emulated system.
|
||||
void Shutdown();
|
||||
|
||||
std::unique_lock<std::mutex> StallCPU();
|
||||
void UnstallCPU();
|
||||
/// Forcibly detach the debugger if it is running.
|
||||
void DetachDebugger();
|
||||
|
||||
std::unique_lock<std::mutex> StallProcesses();
|
||||
void UnstallProcesses();
|
||||
|
||||
/**
|
||||
* Initialize the debugger.
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hardware_properties.h"
|
||||
@@ -41,11 +43,11 @@ CoreTiming::CoreTiming()
|
||||
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
constexpr char name[] = "yuzu:HostTiming";
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance, size_t id) {
|
||||
const std::string name = "yuzu:HostTiming_" + std::to_string(id);
|
||||
MicroProfileOnThreadCreate(name.c_str());
|
||||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
@@ -59,68 +61,92 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {};
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
if (is_multicore) {
|
||||
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), 0);
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Shutdown() {
|
||||
paused = true;
|
||||
is_paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
std::atomic_thread_fence(std::memory_order_release);
|
||||
|
||||
event_cv.notify_all();
|
||||
wait_pause_cv.notify_all();
|
||||
for (auto& thread : worker_threads) {
|
||||
thread.join();
|
||||
}
|
||||
worker_threads.clear();
|
||||
ClearPendingEvents();
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
paused = is_paused;
|
||||
pause_event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
void CoreTiming::Pause(bool is_paused_) {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
Pause(is_paused);
|
||||
if (timer_thread) {
|
||||
if (!is_paused) {
|
||||
pause_event.Set();
|
||||
if (is_multicore) {
|
||||
is_paused = is_paused_;
|
||||
event_cv.notify_all();
|
||||
if (!is_paused_) {
|
||||
wait_pause_cv.notify_all();
|
||||
}
|
||||
}
|
||||
paused_state.store(is_paused_, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused_) {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_multicore) {
|
||||
is_paused = is_paused_;
|
||||
event_cv.notify_all();
|
||||
if (!is_paused_) {
|
||||
wait_pause_cv.notify_all();
|
||||
}
|
||||
}
|
||||
paused_state.store(is_paused_, std::memory_order_relaxed);
|
||||
if (is_multicore) {
|
||||
if (is_paused_) {
|
||||
wait_signal_cv.wait(main_lock, [this] { return pause_count == worker_threads.size(); });
|
||||
} else {
|
||||
wait_signal_cv.wait(main_lock, [this] { return pause_count == 0; });
|
||||
}
|
||||
event.Set();
|
||||
while (paused_set != is_paused)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
bool CoreTiming::IsRunning() const {
|
||||
return !paused_set;
|
||||
return !paused_state.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
return !(wait_set && event_queue.empty());
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
return !event_queue.empty() || pending_events.load(std::memory_order_relaxed) != 0;
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
|
||||
pending_events.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
|
||||
if (is_multicore) {
|
||||
event_cv.notify_one();
|
||||
}
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
std::scoped_lock scope{basic_lock};
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get() && e.user_data == user_data;
|
||||
});
|
||||
@@ -129,6 +155,7 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
pending_events.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,11 +195,12 @@ u64 CoreTiming::GetClockTicks() const {
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_queue.clear();
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get();
|
||||
@@ -186,21 +214,25 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
}
|
||||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
basic_lock.unlock();
|
||||
|
||||
if (const auto event_type{evt.type.lock()}) {
|
||||
event_type->callback(
|
||||
evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)});
|
||||
|
||||
event_mutex.unlock();
|
||||
|
||||
const s64 delay = static_cast<s64>(GetGlobalTimeNs().count() - evt.time);
|
||||
event_type->callback(evt.user_data, std::chrono::nanoseconds{delay});
|
||||
|
||||
event_mutex.lock();
|
||||
pending_events.fetch_sub(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
basic_lock.lock();
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
}
|
||||
|
||||
@@ -213,26 +245,34 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadLoop() {
|
||||
const auto predicate = [this] { return !event_queue.empty() || is_paused; };
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
while (!is_paused && !shutting_down) {
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
if (*next_time > 0) {
|
||||
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
|
||||
event.WaitFor(next_time_ns);
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_cv.wait_for(main_lock, next_time_ns, predicate);
|
||||
}
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
event_cv.wait(main_lock, predicate);
|
||||
}
|
||||
wait_set = false;
|
||||
}
|
||||
paused_set = true;
|
||||
clock->Pause(true);
|
||||
pause_event.Wait();
|
||||
clock->Pause(false);
|
||||
std::unique_lock main_lock(event_mutex);
|
||||
pause_count++;
|
||||
if (pause_count == worker_threads.size()) {
|
||||
clock->Pause(true);
|
||||
wait_signal_cv.notify_all();
|
||||
}
|
||||
wait_pause_cv.wait(main_lock, [this] { return !is_paused || shutting_down; });
|
||||
pause_count--;
|
||||
if (pause_count == 0) {
|
||||
clock->Pause(false);
|
||||
wait_signal_cv.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
@@ -14,7 +15,6 @@
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/wall_clock.h"
|
||||
|
||||
namespace Core::Timing {
|
||||
@@ -131,7 +131,7 @@ private:
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
static void ThreadEntry(CoreTiming& instance, size_t id);
|
||||
void ThreadLoop();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
@@ -144,21 +144,24 @@ private:
|
||||
// accomodated by the standard adaptor class.
|
||||
std::vector<Event> event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
std::atomic<size_t> pending_events{};
|
||||
|
||||
std::shared_ptr<EventType> ev_lost;
|
||||
Common::Event event{};
|
||||
Common::Event pause_event{};
|
||||
std::mutex basic_lock;
|
||||
std::mutex advance_lock;
|
||||
std::unique_ptr<std::thread> timer_thread;
|
||||
std::atomic<bool> paused{};
|
||||
std::atomic<bool> paused_set{};
|
||||
std::atomic<bool> wait_set{};
|
||||
std::atomic<bool> shutting_down{};
|
||||
std::atomic<bool> has_started{};
|
||||
std::function<void()> on_thread_init{};
|
||||
|
||||
std::vector<std::thread> worker_threads;
|
||||
|
||||
std::condition_variable event_cv;
|
||||
std::condition_variable wait_pause_cv;
|
||||
std::condition_variable wait_signal_cv;
|
||||
mutable std::mutex event_mutex;
|
||||
|
||||
std::atomic<bool> paused_state{};
|
||||
bool is_paused{};
|
||||
bool shutting_down{};
|
||||
bool is_multicore{};
|
||||
size_t pause_count{};
|
||||
|
||||
/// Cycle timing
|
||||
u64 ticks{};
|
||||
|
||||
@@ -21,23 +21,24 @@ CpuManager::~CpuManager() = default;
|
||||
|
||||
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
|
||||
std::size_t core) {
|
||||
cpu_manager.RunThread(stop_token, core);
|
||||
cpu_manager.RunThread(core);
|
||||
}
|
||||
|
||||
void CpuManager::Initialize() {
|
||||
running_mode = true;
|
||||
if (is_multicore) {
|
||||
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
||||
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
|
||||
}
|
||||
} else {
|
||||
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
|
||||
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1;
|
||||
gpu_barrier = std::make_unique<Common::Barrier>(num_cores + 1);
|
||||
|
||||
for (std::size_t core = 0; core < num_cores; core++) {
|
||||
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::Shutdown() {
|
||||
running_mode = false;
|
||||
Pause(false);
|
||||
for (std::size_t core = 0; core < num_cores; core++) {
|
||||
if (core_data[core].host_thread.joinable()) {
|
||||
core_data[core].host_thread.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
|
||||
@@ -48,8 +49,8 @@ std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
|
||||
return IdleThreadFunction;
|
||||
}
|
||||
|
||||
std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
|
||||
return SuspendThreadFunction;
|
||||
std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() {
|
||||
return ShutdownThreadFunction;
|
||||
}
|
||||
|
||||
void CpuManager::GuestThreadFunction(void* cpu_manager_) {
|
||||
@@ -79,17 +80,12 @@ void CpuManager::IdleThreadFunction(void* cpu_manager_) {
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
|
||||
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
|
||||
if (cpu_manager->is_multicore) {
|
||||
cpu_manager->MultiCoreRunSuspendThread();
|
||||
} else {
|
||||
cpu_manager->SingleCoreRunSuspendThread();
|
||||
}
|
||||
void CpuManager::ShutdownThreadFunction(void* cpu_manager) {
|
||||
static_cast<CpuManager*>(cpu_manager)->ShutdownThread();
|
||||
}
|
||||
|
||||
void* CpuManager::GetStartFuncParamater() {
|
||||
return static_cast<void*>(this);
|
||||
void* CpuManager::GetStartFuncParameter() {
|
||||
return this;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@@ -99,7 +95,7 @@ void* CpuManager::GetStartFuncParamater() {
|
||||
void CpuManager::MultiCoreRunGuestThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
|
||||
auto& host_context = thread->GetHostContext();
|
||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||
MultiCoreRunGuestLoop();
|
||||
@@ -110,12 +106,10 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||
|
||||
while (true) {
|
||||
auto* physical_core = &kernel.CurrentPhysicalCore();
|
||||
system.EnterDynarmicProfile();
|
||||
while (!physical_core->IsInterrupted()) {
|
||||
physical_core->Run();
|
||||
physical_core = &kernel.CurrentPhysicalCore();
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
{
|
||||
Kernel::KScopedDisableDispatch dd(kernel);
|
||||
physical_core->ArmInterface().ClearExclusiveState();
|
||||
@@ -131,58 +125,6 @@ void CpuManager::MultiCoreRunIdleThread() {
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::MultiCoreRunSuspendThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
while (true) {
|
||||
auto core = kernel.CurrentPhysicalCoreIndex();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::MultiCorePause(bool paused) {
|
||||
if (!paused) {
|
||||
bool all_not_barrier = false;
|
||||
while (!all_not_barrier) {
|
||||
all_not_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_not_barrier &= !data.is_running.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
for (auto& data : core_data) {
|
||||
data.enter_barrier->Set();
|
||||
}
|
||||
if (paused_state.load()) {
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_barrier &= data.is_paused.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
for (auto& data : core_data) {
|
||||
data.exit_barrier->Set();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/// Wait until all cores are paused.
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_barrier &= data.is_paused.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
/// Don't release the barrier
|
||||
}
|
||||
paused_state = paused;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// SingleCore ///
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@@ -190,7 +132,7 @@ void CpuManager::MultiCorePause(bool paused) {
|
||||
void CpuManager::SingleCoreRunGuestThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
|
||||
auto& host_context = thread->GetHostContext();
|
||||
host_context->SetRewindPoint(GuestRewindFunction, this);
|
||||
SingleCoreRunGuestLoop();
|
||||
@@ -200,12 +142,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
|
||||
auto& kernel = system.Kernel();
|
||||
while (true) {
|
||||
auto* physical_core = &kernel.CurrentPhysicalCore();
|
||||
system.EnterDynarmicProfile();
|
||||
if (!physical_core->IsInterrupted()) {
|
||||
physical_core->Run();
|
||||
physical_core = &kernel.CurrentPhysicalCore();
|
||||
}
|
||||
system.ExitDynarmicProfile();
|
||||
kernel.SetIsPhantomModeForSingleCore(true);
|
||||
system.CoreTiming().Advance();
|
||||
kernel.SetIsPhantomModeForSingleCore(false);
|
||||
@@ -228,25 +168,11 @@ void CpuManager::SingleCoreRunIdleThread() {
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::SingleCoreRunSuspendThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
while (true) {
|
||||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
{
|
||||
auto& kernel = system.Kernel();
|
||||
auto& scheduler = kernel.Scheduler(current_core);
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread();
|
||||
if (idle_count >= 4 || from_running_enviroment) {
|
||||
if (!from_running_enviroment) {
|
||||
system.CoreTiming().Idle();
|
||||
@@ -258,7 +184,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
}
|
||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
||||
system.CoreTiming().ResetTicks();
|
||||
scheduler.Unload(scheduler.GetCurrentThread());
|
||||
scheduler.Unload(scheduler.GetSchedulerCurrentThread());
|
||||
|
||||
auto& next_scheduler = kernel.Scheduler(current_core);
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
|
||||
@@ -267,47 +193,23 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
||||
// May have changed scheduler
|
||||
{
|
||||
auto& scheduler = system.Kernel().Scheduler(current_core);
|
||||
scheduler.Reload(scheduler.GetCurrentThread());
|
||||
scheduler.Reload(scheduler.GetSchedulerCurrentThread());
|
||||
if (!scheduler.IsIdle()) {
|
||||
idle_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::SingleCorePause(bool paused) {
|
||||
if (!paused) {
|
||||
bool all_not_barrier = false;
|
||||
while (!all_not_barrier) {
|
||||
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
|
||||
}
|
||||
core_data[0].enter_barrier->Set();
|
||||
if (paused_state.load()) {
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
||||
}
|
||||
core_data[0].exit_barrier->Set();
|
||||
}
|
||||
} else {
|
||||
/// Wait until all cores are paused.
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
||||
}
|
||||
/// Don't release the barrier
|
||||
}
|
||||
paused_state = paused;
|
||||
void CpuManager::ShutdownThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0;
|
||||
auto* current_thread = kernel.GetCurrentEmuThread();
|
||||
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void CpuManager::Pause(bool paused) {
|
||||
if (is_multicore) {
|
||||
MultiCorePause(paused);
|
||||
} else {
|
||||
SingleCorePause(paused);
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||
void CpuManager::RunThread(std::size_t core) {
|
||||
/// Initialization
|
||||
system.RegisterCoreThread(core);
|
||||
std::string name;
|
||||
@@ -320,45 +222,24 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
auto& data = core_data[core];
|
||||
data.enter_barrier = std::make_unique<Common::Event>();
|
||||
data.exit_barrier = std::make_unique<Common::Event>();
|
||||
data.host_context = Common::Fiber::ThreadToFiber();
|
||||
data.is_running = false;
|
||||
data.initialized = true;
|
||||
const bool sc_sync = !is_async_gpu && !is_multicore;
|
||||
bool sc_sync_first_use = sc_sync;
|
||||
|
||||
// Cleanup
|
||||
SCOPE_EXIT({
|
||||
data.host_context->Exit();
|
||||
data.enter_barrier.reset();
|
||||
data.exit_barrier.reset();
|
||||
data.initialized = false;
|
||||
MicroProfileOnThreadExit();
|
||||
});
|
||||
|
||||
/// Running
|
||||
while (running_mode) {
|
||||
data.is_running = false;
|
||||
data.enter_barrier->Wait();
|
||||
if (sc_sync_first_use) {
|
||||
system.GPU().ObtainContext();
|
||||
sc_sync_first_use = false;
|
||||
}
|
||||
// Running
|
||||
gpu_barrier->Sync();
|
||||
|
||||
// Emulation was stopped
|
||||
if (stop_token.stop_requested()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
data.is_running = true;
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
data.is_running = false;
|
||||
data.is_paused = true;
|
||||
data.exit_barrier->Wait();
|
||||
data.is_paused = false;
|
||||
if (!is_async_gpu && !is_multicore) {
|
||||
system.GPU().ObtainContext();
|
||||
}
|
||||
|
||||
auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread();
|
||||
Kernel::SetCurrentThread(system.Kernel(), current_thread);
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -43,15 +43,17 @@ public:
|
||||
is_async_gpu = is_async;
|
||||
}
|
||||
|
||||
void OnGpuReady() {
|
||||
gpu_barrier->Sync();
|
||||
}
|
||||
|
||||
void Initialize();
|
||||
void Shutdown();
|
||||
|
||||
void Pause(bool paused);
|
||||
|
||||
static std::function<void(void*)> GetGuestThreadStartFunc();
|
||||
static std::function<void(void*)> GetIdleThreadStartFunc();
|
||||
static std::function<void(void*)> GetSuspendThreadStartFunc();
|
||||
void* GetStartFuncParamater();
|
||||
static std::function<void(void*)> GetShutdownThreadStartFunc();
|
||||
void* GetStartFuncParameter();
|
||||
|
||||
void PreemptSingleCore(bool from_running_enviroment = true);
|
||||
|
||||
@@ -63,43 +65,34 @@ private:
|
||||
static void GuestThreadFunction(void* cpu_manager);
|
||||
static void GuestRewindFunction(void* cpu_manager);
|
||||
static void IdleThreadFunction(void* cpu_manager);
|
||||
static void SuspendThreadFunction(void* cpu_manager);
|
||||
static void ShutdownThreadFunction(void* cpu_manager);
|
||||
|
||||
void MultiCoreRunGuestThread();
|
||||
void MultiCoreRunGuestLoop();
|
||||
void MultiCoreRunIdleThread();
|
||||
void MultiCoreRunSuspendThread();
|
||||
void MultiCorePause(bool paused);
|
||||
|
||||
void SingleCoreRunGuestThread();
|
||||
void SingleCoreRunGuestLoop();
|
||||
void SingleCoreRunIdleThread();
|
||||
void SingleCoreRunSuspendThread();
|
||||
void SingleCorePause(bool paused);
|
||||
|
||||
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
|
||||
|
||||
void RunThread(std::stop_token stop_token, std::size_t core);
|
||||
void ShutdownThread();
|
||||
void RunThread(std::size_t core);
|
||||
|
||||
struct CoreData {
|
||||
std::shared_ptr<Common::Fiber> host_context;
|
||||
std::unique_ptr<Common::Event> enter_barrier;
|
||||
std::unique_ptr<Common::Event> exit_barrier;
|
||||
std::atomic<bool> is_running;
|
||||
std::atomic<bool> is_paused;
|
||||
std::atomic<bool> initialized;
|
||||
std::jthread host_thread;
|
||||
};
|
||||
|
||||
std::atomic<bool> running_mode{};
|
||||
std::atomic<bool> paused_state{};
|
||||
|
||||
std::unique_ptr<Common::Barrier> gpu_barrier{};
|
||||
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
|
||||
|
||||
bool is_async_gpu{};
|
||||
bool is_multicore{};
|
||||
std::atomic<std::size_t> current_core{};
|
||||
std::size_t idle_count{};
|
||||
std::size_t num_cores{};
|
||||
static constexpr std::size_t max_cycle_runs = 5;
|
||||
|
||||
System& system;
|
||||
|
||||
@@ -140,7 +140,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
|
||||
return 0x3C;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
@@ -155,7 +154,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
return 0x40;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SignatureType Ticket::GetSignatureType() const {
|
||||
|
||||
@@ -42,6 +42,18 @@ static std::span<const u8> ReceiveInto(Readable& r, Buffer& buffer) {
|
||||
return received_data;
|
||||
}
|
||||
|
||||
enum class SignalType {
|
||||
Stopped,
|
||||
Watchpoint,
|
||||
ShuttingDown,
|
||||
};
|
||||
|
||||
struct SignalInfo {
|
||||
SignalType type;
|
||||
Kernel::KThread* thread;
|
||||
const Kernel::DebugWatchpoint* watchpoint;
|
||||
};
|
||||
|
||||
namespace Core {
|
||||
|
||||
class DebuggerImpl : public DebuggerBackend {
|
||||
@@ -56,17 +68,23 @@ public:
|
||||
ShutdownServer();
|
||||
}
|
||||
|
||||
bool NotifyThreadStopped(Kernel::KThread* thread) {
|
||||
std::scoped_lock lk{connection_lock};
|
||||
bool SignalDebugger(SignalInfo signal_info) {
|
||||
{
|
||||
std::scoped_lock lk{connection_lock};
|
||||
|
||||
if (stopped) {
|
||||
// Do not notify the debugger about another event.
|
||||
// It should be ignored.
|
||||
return false;
|
||||
if (stopped) {
|
||||
// Do not notify the debugger about another event.
|
||||
// It should be ignored.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Set up the state.
|
||||
stopped = true;
|
||||
info = signal_info;
|
||||
}
|
||||
stopped = true;
|
||||
|
||||
boost::asio::write(signal_pipe, boost::asio::buffer(&thread, sizeof(thread)));
|
||||
// Write a single byte into the pipe to wake up the debug interface.
|
||||
boost::asio::write(signal_pipe, boost::asio::buffer(&stopped, sizeof(stopped)));
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -96,7 +114,7 @@ private:
|
||||
connection_thread = std::jthread([&, port](std::stop_token stop_token) {
|
||||
try {
|
||||
// Initialize the listening socket and accept a new client.
|
||||
tcp::endpoint endpoint{boost::asio::ip::address_v4::loopback(), port};
|
||||
tcp::endpoint endpoint{boost::asio::ip::address_v4::any(), port};
|
||||
tcp::acceptor acceptor{io_context, endpoint};
|
||||
|
||||
acceptor.async_accept(client_socket, [](const auto&) {});
|
||||
@@ -124,12 +142,9 @@ private:
|
||||
Common::SetCurrentThreadName("yuzu:Debugger");
|
||||
|
||||
// Set up the client signals for new data.
|
||||
AsyncReceiveInto(signal_pipe, active_thread, [&](auto d) { PipeData(d); });
|
||||
AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
|
||||
AsyncReceiveInto(client_socket, client_data, [&](auto d) { ClientData(d); });
|
||||
|
||||
// Stop the emulated CPU.
|
||||
AllCoreStop();
|
||||
|
||||
// Set the active thread.
|
||||
UpdateActiveThread();
|
||||
|
||||
@@ -142,9 +157,33 @@ private:
|
||||
}
|
||||
|
||||
void PipeData(std::span<const u8> data) {
|
||||
AllCoreStop();
|
||||
UpdateActiveThread();
|
||||
frontend->Stopped(active_thread);
|
||||
switch (info.type) {
|
||||
case SignalType::Stopped:
|
||||
case SignalType::Watchpoint:
|
||||
// Stop emulation.
|
||||
PauseEmulation();
|
||||
|
||||
// Notify the client.
|
||||
active_thread = info.thread;
|
||||
UpdateActiveThread();
|
||||
|
||||
if (info.type == SignalType::Watchpoint) {
|
||||
frontend->Watchpoint(active_thread, *info.watchpoint);
|
||||
} else {
|
||||
frontend->Stopped(active_thread);
|
||||
}
|
||||
|
||||
break;
|
||||
case SignalType::ShuttingDown:
|
||||
frontend->ShuttingDown();
|
||||
|
||||
// Wait for emulation to shut down gracefully now.
|
||||
signal_pipe.close();
|
||||
client_socket.shutdown(boost::asio::socket_base::shutdown_both);
|
||||
LOG_INFO(Debug_GDBStub, "Shut down server");
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void ClientData(std::span<const u8> data) {
|
||||
@@ -156,32 +195,29 @@ private:
|
||||
std::scoped_lock lk{connection_lock};
|
||||
stopped = true;
|
||||
}
|
||||
AllCoreStop();
|
||||
PauseEmulation();
|
||||
UpdateActiveThread();
|
||||
frontend->Stopped(active_thread);
|
||||
break;
|
||||
}
|
||||
case DebuggerAction::Continue:
|
||||
active_thread->SetStepState(Kernel::StepState::NotStepping);
|
||||
ResumeInactiveThreads();
|
||||
AllCoreResume();
|
||||
MarkResumed([&] { ResumeEmulation(); });
|
||||
break;
|
||||
case DebuggerAction::StepThreadUnlocked:
|
||||
active_thread->SetStepState(Kernel::StepState::StepPending);
|
||||
ResumeInactiveThreads();
|
||||
AllCoreResume();
|
||||
MarkResumed([&] {
|
||||
active_thread->SetStepState(Kernel::StepState::StepPending);
|
||||
active_thread->Resume(Kernel::SuspendType::Debug);
|
||||
ResumeEmulation(active_thread);
|
||||
});
|
||||
break;
|
||||
case DebuggerAction::StepThreadLocked:
|
||||
active_thread->SetStepState(Kernel::StepState::StepPending);
|
||||
SuspendInactiveThreads();
|
||||
AllCoreResume();
|
||||
case DebuggerAction::StepThreadLocked: {
|
||||
MarkResumed([&] {
|
||||
active_thread->SetStepState(Kernel::StepState::StepPending);
|
||||
active_thread->Resume(Kernel::SuspendType::Debug);
|
||||
});
|
||||
break;
|
||||
}
|
||||
case DebuggerAction::ShutdownEmulation: {
|
||||
// Suspend all threads and release any locks held
|
||||
active_thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
SuspendInactiveThreads();
|
||||
AllCoreResume();
|
||||
|
||||
// Spawn another thread that will exit after shutdown,
|
||||
// to avoid a deadlock
|
||||
Core::System* system_ref{&system};
|
||||
@@ -193,33 +229,33 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void AllCoreStop() {
|
||||
if (!suspend) {
|
||||
suspend = system.StallCPU();
|
||||
void PauseEmulation() {
|
||||
// Put all threads to sleep on next scheduler round.
|
||||
for (auto* thread : ThreadList()) {
|
||||
thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
}
|
||||
|
||||
// Signal an interrupt so that scheduler will fire.
|
||||
system.Kernel().InterruptAllPhysicalCores();
|
||||
}
|
||||
|
||||
void ResumeEmulation(Kernel::KThread* except = nullptr) {
|
||||
// Wake up all threads.
|
||||
for (auto* thread : ThreadList()) {
|
||||
if (thread == except) {
|
||||
continue;
|
||||
}
|
||||
|
||||
thread->SetStepState(Kernel::StepState::NotStepping);
|
||||
thread->Resume(Kernel::SuspendType::Debug);
|
||||
}
|
||||
}
|
||||
|
||||
void AllCoreResume() {
|
||||
template <typename Callback>
|
||||
void MarkResumed(Callback&& cb) {
|
||||
std::scoped_lock lk{connection_lock};
|
||||
stopped = false;
|
||||
system.UnstallCPU();
|
||||
suspend.reset();
|
||||
}
|
||||
|
||||
void SuspendInactiveThreads() {
|
||||
for (auto* thread : ThreadList()) {
|
||||
if (thread != active_thread) {
|
||||
thread->RequestSuspend(Kernel::SuspendType::Debug);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ResumeInactiveThreads() {
|
||||
for (auto* thread : ThreadList()) {
|
||||
if (thread != active_thread) {
|
||||
thread->Resume(Kernel::SuspendType::Debug);
|
||||
thread->SetStepState(Kernel::StepState::NotStepping);
|
||||
}
|
||||
}
|
||||
cb();
|
||||
}
|
||||
|
||||
void UpdateActiveThread() {
|
||||
@@ -227,8 +263,6 @@ private:
|
||||
if (std::find(threads.begin(), threads.end(), active_thread) == threads.end()) {
|
||||
active_thread = threads[0];
|
||||
}
|
||||
active_thread->Resume(Kernel::SuspendType::Debug);
|
||||
active_thread->SetStepState(Kernel::StepState::NotStepping);
|
||||
}
|
||||
|
||||
const std::vector<Kernel::KThread*>& ThreadList() {
|
||||
@@ -244,9 +278,10 @@ private:
|
||||
boost::asio::io_context io_context;
|
||||
boost::process::async_pipe signal_pipe;
|
||||
boost::asio::ip::tcp::socket client_socket;
|
||||
std::optional<std::unique_lock<std::mutex>> suspend;
|
||||
|
||||
SignalInfo info;
|
||||
Kernel::KThread* active_thread;
|
||||
bool pipe_data;
|
||||
bool stopped;
|
||||
|
||||
std::array<u8, 4096> client_data;
|
||||
@@ -263,7 +298,18 @@ Debugger::Debugger(Core::System& system, u16 port) {
|
||||
Debugger::~Debugger() = default;
|
||||
|
||||
bool Debugger::NotifyThreadStopped(Kernel::KThread* thread) {
|
||||
return impl && impl->NotifyThreadStopped(thread);
|
||||
return impl && impl->SignalDebugger(SignalInfo{SignalType::Stopped, thread, nullptr});
|
||||
}
|
||||
|
||||
bool Debugger::NotifyThreadWatchpoint(Kernel::KThread* thread,
|
||||
const Kernel::DebugWatchpoint& watch) {
|
||||
return impl && impl->SignalDebugger(SignalInfo{SignalType::Watchpoint, thread, &watch});
|
||||
}
|
||||
|
||||
void Debugger::NotifyShutdown() {
|
||||
if (impl) {
|
||||
impl->SignalDebugger(SignalInfo{SignalType::ShuttingDown, nullptr, nullptr});
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -9,7 +9,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
class KThread;
|
||||
}
|
||||
struct DebugWatchpoint;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -35,6 +36,16 @@ public:
|
||||
*/
|
||||
bool NotifyThreadStopped(Kernel::KThread* thread);
|
||||
|
||||
/**
|
||||
* Notify the debugger that a shutdown is being performed now and disconnect.
|
||||
*/
|
||||
void NotifyShutdown();
|
||||
|
||||
/*
|
||||
* Notify the debugger that the given thread has stopped due to hitting a watchpoint.
|
||||
*/
|
||||
bool NotifyThreadWatchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch);
|
||||
|
||||
private:
|
||||
std::unique_ptr<DebuggerImpl> impl;
|
||||
};
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
class KThread;
|
||||
}
|
||||
struct DebugWatchpoint;
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
|
||||
@@ -66,6 +67,16 @@ public:
|
||||
*/
|
||||
virtual void Stopped(Kernel::KThread* thread) = 0;
|
||||
|
||||
/**
|
||||
* Called when emulation is shutting down.
|
||||
*/
|
||||
virtual void ShuttingDown() = 0;
|
||||
|
||||
/*
|
||||
* Called when emulation has stopped on a watchpoint.
|
||||
*/
|
||||
virtual void Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) = 0;
|
||||
|
||||
/**
|
||||
* Called when new data is asynchronously received on the client socket.
|
||||
* A list of actions to perform is returned.
|
||||
|
||||
@@ -106,10 +106,29 @@ GDBStub::~GDBStub() = default;
|
||||
|
||||
void GDBStub::Connected() {}
|
||||
|
||||
void GDBStub::ShuttingDown() {}
|
||||
|
||||
void GDBStub::Stopped(Kernel::KThread* thread) {
|
||||
SendReply(arch->ThreadStatus(thread, GDB_STUB_SIGTRAP));
|
||||
}
|
||||
|
||||
void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) {
|
||||
const auto status{arch->ThreadStatus(thread, GDB_STUB_SIGTRAP)};
|
||||
|
||||
switch (watch.type) {
|
||||
case Kernel::DebugWatchpointType::Read:
|
||||
SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address));
|
||||
break;
|
||||
case Kernel::DebugWatchpointType::Write:
|
||||
SendReply(fmt::format("{}watch:{:x};", status, watch.start_address));
|
||||
break;
|
||||
case Kernel::DebugWatchpointType::ReadOrWrite:
|
||||
default:
|
||||
SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<DebuggerAction> GDBStub::ClientData(std::span<const u8> data) {
|
||||
std::vector<DebuggerAction> actions;
|
||||
current_command.insert(current_command.end(), data.begin(), data.end());
|
||||
@@ -233,6 +252,7 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const auto sep{std::find(command.begin(), command.end(), '=') - command.begin() + 1};
|
||||
const size_t reg{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
|
||||
arch->RegWrite(backend.GetActiveThread(), reg, std::string_view(command).substr(sep));
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
break;
|
||||
}
|
||||
case 'm': {
|
||||
@@ -276,44 +296,124 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
case 'c':
|
||||
actions.push_back(DebuggerAction::Continue);
|
||||
break;
|
||||
case 'Z': {
|
||||
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
|
||||
if (system.Memory().IsValidVirtualAddress(addr)) {
|
||||
replaced_instructions[addr] = system.Memory().Read32(addr);
|
||||
system.Memory().Write32(addr, arch->BreakpointInstruction());
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
}
|
||||
case 'Z':
|
||||
HandleBreakpointInsert(command);
|
||||
break;
|
||||
}
|
||||
case 'z': {
|
||||
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
|
||||
const auto orig_insn{replaced_instructions.find(addr)};
|
||||
if (system.Memory().IsValidVirtualAddress(addr) &&
|
||||
orig_insn != replaced_instructions.end()) {
|
||||
system.Memory().Write32(addr, orig_insn->second);
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
replaced_instructions.erase(addr);
|
||||
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
}
|
||||
case 'z':
|
||||
HandleBreakpointRemove(command);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
SendReply(GDB_STUB_REPLY_EMPTY);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
enum class BreakpointType {
|
||||
Software = 0,
|
||||
Hardware = 1,
|
||||
WriteWatch = 2,
|
||||
ReadWatch = 3,
|
||||
AccessWatch = 4,
|
||||
};
|
||||
|
||||
void GDBStub::HandleBreakpointInsert(std::string_view command) {
|
||||
const auto type{static_cast<BreakpointType>(strtoll(command.data(), nullptr, 16))};
|
||||
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
|
||||
const auto size_sep{std::find(command.begin() + addr_sep, command.end(), ',') -
|
||||
command.begin() + 1};
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
bool success{};
|
||||
|
||||
switch (type) {
|
||||
case BreakpointType::Software:
|
||||
replaced_instructions[addr] = system.Memory().Read32(addr);
|
||||
system.Memory().Write32(addr, arch->BreakpointInstruction());
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
success = true;
|
||||
break;
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.CurrentProcess()->InsertWatchpoint(system, addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.CurrentProcess()->InsertWatchpoint(system, addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.CurrentProcess()->InsertWatchpoint(
|
||||
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
SendReply(GDB_STUB_REPLY_EMPTY);
|
||||
return;
|
||||
}
|
||||
|
||||
if (success) {
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
}
|
||||
}
|
||||
|
||||
void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
const auto type{static_cast<BreakpointType>(strtoll(command.data(), nullptr, 16))};
|
||||
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
|
||||
const auto size_sep{std::find(command.begin() + addr_sep, command.end(), ',') -
|
||||
command.begin() + 1};
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
bool success{};
|
||||
|
||||
switch (type) {
|
||||
case BreakpointType::Software: {
|
||||
const auto orig_insn{replaced_instructions.find(addr)};
|
||||
if (orig_insn != replaced_instructions.end()) {
|
||||
system.Memory().Write32(addr, orig_insn->second);
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
replaced_instructions.erase(addr);
|
||||
success = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.CurrentProcess()->RemoveWatchpoint(system, addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.CurrentProcess()->RemoveWatchpoint(system, addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.CurrentProcess()->RemoveWatchpoint(
|
||||
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
SendReply(GDB_STUB_REPLY_EMPTY);
|
||||
return;
|
||||
}
|
||||
|
||||
if (success) {
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
}
|
||||
}
|
||||
|
||||
// Structure offsets are from Atmosphere
|
||||
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
|
||||
|
||||
|
||||
@@ -23,6 +23,8 @@ public:
|
||||
|
||||
void Connected() override;
|
||||
void Stopped(Kernel::KThread* thread) override;
|
||||
void ShuttingDown() override;
|
||||
void Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) override;
|
||||
std::vector<DebuggerAction> ClientData(std::span<const u8> data) override;
|
||||
|
||||
private:
|
||||
@@ -30,6 +32,8 @@ private:
|
||||
void ExecuteCommand(std::string_view packet, std::vector<DebuggerAction>& actions);
|
||||
void HandleVCont(std::string_view command, std::vector<DebuggerAction>& actions);
|
||||
void HandleQuery(std::string_view command);
|
||||
void HandleBreakpointInsert(std::string_view command);
|
||||
void HandleBreakpointRemove(std::string_view command);
|
||||
std::vector<char>::const_iterator CommandEnd() const;
|
||||
std::optional<std::string> DetachCommand();
|
||||
Kernel::KThread* GetThreadByID(u64 thread_id);
|
||||
|
||||
@@ -419,7 +419,7 @@ std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type
|
||||
Core::Crypto::Mode::ECB);
|
||||
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
|
||||
|
||||
Core::Crypto::Key128 out;
|
||||
Core::Crypto::Key128 out{};
|
||||
if (type == NCASectionCryptoType::XTS) {
|
||||
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
|
||||
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {
|
||||
|
||||
@@ -8,14 +8,14 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
|
||||
constexpr ResultCode ERROR_PATH_ALREADY_EXISTS{ErrorModule::FS, 2};
|
||||
constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
|
||||
constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
|
||||
constexpr ResultCode ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
|
||||
constexpr ResultCode ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
|
||||
constexpr ResultCode ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
|
||||
constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
|
||||
constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
|
||||
constexpr Result ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1};
|
||||
constexpr Result ERROR_PATH_ALREADY_EXISTS{ErrorModule::FS, 2};
|
||||
constexpr Result ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002};
|
||||
constexpr Result ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001};
|
||||
constexpr Result ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005};
|
||||
constexpr Result ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223};
|
||||
constexpr Result ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
|
||||
constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
|
||||
constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
|
||||
|
||||
} // namespace FileSys
|
||||
|
||||
@@ -50,7 +50,7 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
||||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
|
||||
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
|
||||
return {0, 0};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
@@ -108,7 +108,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
|
||||
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
|
||||
return ContentRecordType::HtmlDocument;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
|
||||
ASSERT_MSG(false, "Invalid NCAContentType={:02X}", type);
|
||||
return ContentRecordType{};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
|
||||
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
|
||||
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
|
||||
controller->Connect(true);
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
|
||||
ASSERT_MSG(false, "Unable to add a new controller based on the given parameters!");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,12 +8,12 @@ namespace Core::Frontend {
|
||||
|
||||
ErrorApplet::~ErrorApplet() = default;
|
||||
|
||||
void DefaultErrorApplet::ShowError(ResultCode error, std::function<void()> finished) const {
|
||||
void DefaultErrorApplet::ShowError(Result error, std::function<void()> finished) const {
|
||||
LOG_CRITICAL(Service_Fatal, "Application requested error display: {:04}-{:04} (raw={:08X})",
|
||||
error.module.Value(), error.description.Value(), error.raw);
|
||||
}
|
||||
|
||||
void DefaultErrorApplet::ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
void DefaultErrorApplet::ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const {
|
||||
LOG_CRITICAL(
|
||||
Service_Fatal,
|
||||
@@ -21,7 +21,7 @@ void DefaultErrorApplet::ShowErrorWithTimestamp(ResultCode error, std::chrono::s
|
||||
error.module.Value(), error.description.Value(), error.raw, time.count());
|
||||
}
|
||||
|
||||
void DefaultErrorApplet::ShowCustomErrorText(ResultCode error, std::string main_text,
|
||||
void DefaultErrorApplet::ShowCustomErrorText(Result error, std::string main_text,
|
||||
std::string detail_text,
|
||||
std::function<void()> finished) const {
|
||||
LOG_CRITICAL(Service_Fatal,
|
||||
|
||||
@@ -14,22 +14,22 @@ class ErrorApplet {
|
||||
public:
|
||||
virtual ~ErrorApplet();
|
||||
|
||||
virtual void ShowError(ResultCode error, std::function<void()> finished) const = 0;
|
||||
virtual void ShowError(Result error, std::function<void()> finished) const = 0;
|
||||
|
||||
virtual void ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
virtual void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const = 0;
|
||||
|
||||
virtual void ShowCustomErrorText(ResultCode error, std::string dialog_text,
|
||||
virtual void ShowCustomErrorText(Result error, std::string dialog_text,
|
||||
std::string fullscreen_text,
|
||||
std::function<void()> finished) const = 0;
|
||||
};
|
||||
|
||||
class DefaultErrorApplet final : public ErrorApplet {
|
||||
public:
|
||||
void ShowError(ResultCode error, std::function<void()> finished) const override;
|
||||
void ShowErrorWithTimestamp(ResultCode error, std::chrono::seconds time,
|
||||
void ShowError(Result error, std::function<void()> finished) const override;
|
||||
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
std::function<void()> finished) const override;
|
||||
void ShowCustomErrorText(ResultCode error, std::string main_text, std::string detail_text,
|
||||
void ShowCustomErrorText(Result error, std::string main_text, std::string detail_text,
|
||||
std::function<void()> finished) const override;
|
||||
};
|
||||
|
||||
|
||||
@@ -25,6 +25,9 @@ constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
|
||||
};
|
||||
|
||||
// Cortex-A57 supports 4 memory watchpoints
|
||||
constexpr u64 NUM_WATCHPOINTS = 4;
|
||||
|
||||
} // namespace Hardware
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -48,7 +48,7 @@ EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type) {
|
||||
return handheld.get();
|
||||
case NpadIdType::Invalid:
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
|
||||
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
@@ -77,7 +77,7 @@ const EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type
|
||||
return handheld.get();
|
||||
case NpadIdType::Invalid:
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
|
||||
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
namespace IPC {
|
||||
|
||||
constexpr ResultCode ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301};
|
||||
constexpr Result ERR_REMOTE_PROCESS_DEAD{ErrorModule::HIPC, 301};
|
||||
|
||||
class RequestHelperBase {
|
||||
protected:
|
||||
@@ -176,7 +176,7 @@ public:
|
||||
void PushImpl(float value);
|
||||
void PushImpl(double value);
|
||||
void PushImpl(bool value);
|
||||
void PushImpl(ResultCode value);
|
||||
void PushImpl(Result value);
|
||||
|
||||
template <typename T>
|
||||
void Push(T value) {
|
||||
@@ -251,7 +251,7 @@ void ResponseBuilder::PushRaw(const T& value) {
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
inline void ResponseBuilder::PushImpl(ResultCode value) {
|
||||
inline void ResponseBuilder::PushImpl(Result value) {
|
||||
// Result codes are actually 64-bit in the IPC buffer, but only the high part is discarded.
|
||||
Push(value.raw);
|
||||
Push<u32>(0);
|
||||
@@ -481,8 +481,8 @@ inline bool RequestParser::Pop() {
|
||||
}
|
||||
|
||||
template <>
|
||||
inline ResultCode RequestParser::Pop() {
|
||||
return ResultCode{Pop<u32>()};
|
||||
inline Result RequestParser::Pop() {
|
||||
return Result{Pop<u32>()};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -188,8 +188,8 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
|
||||
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf) {
|
||||
Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf) {
|
||||
ParseCommandBuffer(handle_table, src_cmdbuf, true);
|
||||
|
||||
if (command_header->IsCloseCommand()) {
|
||||
@@ -202,7 +202,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
|
||||
Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
|
||||
auto current_offset = handles_offset;
|
||||
auto& owner_process = *requesting_thread.GetOwnerProcess();
|
||||
auto& handle_table = owner_process.GetHandleTable();
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
#include "core/hle/ipc.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
@@ -71,10 +71,10 @@ public:
|
||||
* it should be used to differentiate which client (As in ClientSession) we're answering to.
|
||||
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
|
||||
* this request (ServerSession, Originator thread, Translated command buffer, etc).
|
||||
* @returns ResultCode the result code of the translate operation.
|
||||
* @returns Result the result code of the translate operation.
|
||||
*/
|
||||
virtual ResultCode HandleSyncRequest(Kernel::KServerSession& session,
|
||||
Kernel::HLERequestContext& context) = 0;
|
||||
virtual Result HandleSyncRequest(Kernel::KServerSession& session,
|
||||
Kernel::HLERequestContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Signals that a client has just connected to this HLE handler and keeps the
|
||||
@@ -141,7 +141,7 @@ public:
|
||||
if (index < DomainHandlerCount()) {
|
||||
domain_handlers[index] = nullptr;
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unexpected handler index {}", index);
|
||||
ASSERT_MSG(false, "Unexpected handler index {}", index);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,11 +212,10 @@ public:
|
||||
}
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
|
||||
|
||||
/// Writes data from this context back to the requesting process/thread.
|
||||
ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread);
|
||||
Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
|
||||
|
||||
u32_le GetHipcCommand() const {
|
||||
return command;
|
||||
|
||||
@@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
||||
// If we somehow get an invalid type, abort.
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
|
||||
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
|
||||
}
|
||||
|
||||
// If we've hit the end of a gap, free it.
|
||||
|
||||
@@ -90,8 +90,7 @@ public:
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
@@ -108,7 +107,7 @@ private:
|
||||
|
||||
} // namespace
|
||||
|
||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -131,7 +130,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -164,7 +163,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
@@ -232,9 +231,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
@@ -285,9 +284,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -25,8 +25,7 @@ public:
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
|
||||
s32 count) {
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
@@ -35,12 +34,12 @@ public:
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
[[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
@@ -49,16 +48,16 @@ public:
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
[[nodiscard]] Result Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
||||
ASSERT(IsAllowed39BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
class KProcess;
|
||||
|
||||
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
|
||||
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
|
||||
\
|
||||
private: \
|
||||
friend class ::Kernel::KClassTokenGenerator; \
|
||||
@@ -40,16 +40,19 @@ public:
|
||||
static constexpr const char* GetStaticTypeName() { \
|
||||
return TypeName; \
|
||||
} \
|
||||
virtual TypeObj GetTypeObj() const { \
|
||||
virtual TypeObj GetTypeObj() ATTRIBUTE { \
|
||||
return GetStaticTypeObj(); \
|
||||
} \
|
||||
virtual const char* GetTypeName() const { \
|
||||
virtual const char* GetTypeName() ATTRIBUTE { \
|
||||
return GetStaticTypeName(); \
|
||||
} \
|
||||
\
|
||||
private: \
|
||||
constexpr bool operator!=(const TypeObj& rhs)
|
||||
|
||||
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
|
||||
|
||||
class KAutoObject {
|
||||
protected:
|
||||
class TypeObj {
|
||||
@@ -82,7 +85,7 @@ protected:
|
||||
};
|
||||
|
||||
private:
|
||||
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
|
||||
|
||||
public:
|
||||
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
|
||||
|
||||
@@ -49,6 +49,7 @@ private:
|
||||
}
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
}();
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -59,8 +59,8 @@ bool KClientPort::IsSignaled() const {
|
||||
return num_sessions < max_sessions;
|
||||
}
|
||||
|
||||
ResultCode KClientPort::CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager) {
|
||||
Result KClientPort::CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager) {
|
||||
// Reserve a new session from the resource limit.
|
||||
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
|
||||
LimitableResource::Sessions);
|
||||
|
||||
@@ -53,8 +53,8 @@ public:
|
||||
void Destroy() override;
|
||||
bool IsSignaled() const override;
|
||||
|
||||
ResultCode CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
|
||||
Result CreateSession(KClientSession** out,
|
||||
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
|
||||
|
||||
private:
|
||||
std::atomic<s32> num_sessions{};
|
||||
|
||||
@@ -21,8 +21,8 @@ void KClientSession::Destroy() {
|
||||
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
// Signal the server session that new data is available
|
||||
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
@@ -46,8 +46,8 @@ public:
|
||||
return parent;
|
||||
}
|
||||
|
||||
ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
void OnServerClosed();
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "core/hle/kernel/k_code_memory.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
@@ -19,7 +19,7 @@ namespace Kernel {
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
|
||||
|
||||
ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
// Set members.
|
||||
m_owner = kernel.CurrentProcess();
|
||||
|
||||
@@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
|
||||
auto& page_table = m_owner->PageTable();
|
||||
|
||||
// Construct the page group.
|
||||
m_page_group =
|
||||
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
|
||||
m_page_group = {};
|
||||
|
||||
// Lock the memory.
|
||||
R_TRY(page_table.LockForCodeMemory(addr, size))
|
||||
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
|
||||
|
||||
// Clear the memory.
|
||||
//
|
||||
// FIXME: this ends up clobbering address ranges outside the scope of the mapping within
|
||||
// guest memory, and is not specifically required if the guest program is correctly
|
||||
// written, so disable until this is further investigated.
|
||||
//
|
||||
// for (const auto& block : m_page_group.Nodes()) {
|
||||
// std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||
// }
|
||||
for (const auto& block : m_page_group.Nodes()) {
|
||||
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||
}
|
||||
|
||||
// Set remaining tracking members.
|
||||
m_owner->Open();
|
||||
m_address = addr;
|
||||
m_is_initialized = true;
|
||||
m_is_owner_mapped = false;
|
||||
@@ -57,11 +52,17 @@ void KCodeMemory::Finalize() {
|
||||
// Unlock.
|
||||
if (!m_is_mapped && !m_is_owner_mapped) {
|
||||
const size_t size = m_page_group.GetNumPages() * PageSize;
|
||||
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
|
||||
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
|
||||
}
|
||||
|
||||
// Close the page group.
|
||||
m_page_group = {};
|
||||
|
||||
// Close our reference to our owner.
|
||||
m_owner->Close();
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -81,7 +82,7 @@ ResultCode KCodeMemory::Map(VAddr address, size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -98,7 +99,7 @@ ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
|
||||
k_perm = KMemoryPermission::UserReadExecute;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
// Already validated by ControlCodeMemory svc
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Map the memory.
|
||||
@@ -131,7 +133,7 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
@@ -29,13 +29,13 @@ class KCodeMemory final
|
||||
public:
|
||||
explicit KCodeMemory(KernelCore& kernel_);
|
||||
|
||||
ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
void Finalize();
|
||||
|
||||
ResultCode Map(VAddr address, size_t size);
|
||||
ResultCode Unmap(VAddr address, size_t size);
|
||||
ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
ResultCode UnmapFromOwner(VAddr address, size_t size);
|
||||
Result Map(VAddr address, size_t size);
|
||||
Result Unmap(VAddr address, size_t size);
|
||||
Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
Result UnmapFromOwner(VAddr address, size_t size);
|
||||
|
||||
bool IsInitialized() const {
|
||||
return m_is_initialized;
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
KPageLinkedList m_page_group{};
|
||||
KPageGroup m_page_group{};
|
||||
KProcess* m_owner{};
|
||||
VAddr m_address{};
|
||||
KLightLock m_lock;
|
||||
|
||||
@@ -61,8 +61,7 @@ public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||
: KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||
|
||||
@@ -80,8 +79,7 @@ public:
|
||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
@@ -105,8 +103,8 @@ KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
@@ -126,7 +124,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
ResultCode result{ResultSuccess};
|
||||
Result result{ResultSuccess};
|
||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
@@ -146,8 +144,8 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
|
||||
// Wait for the address.
|
||||
@@ -261,7 +259,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||
|
||||
@@ -25,12 +25,12 @@ public:
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
[[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
void SignalImpl(KThread* thread);
|
||||
|
||||
@@ -8,7 +8,7 @@ namespace Kernel {
|
||||
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
KHandleTable::~KHandleTable() = default;
|
||||
|
||||
ResultCode KHandleTable::Finalize() {
|
||||
Result KHandleTable::Finalize() {
|
||||
// Get the table and clear our record of it.
|
||||
u16 saved_table_size = 0;
|
||||
{
|
||||
@@ -62,7 +62,7 @@ bool KHandleTable::Remove(Handle handle) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
@@ -85,7 +85,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||
Result KHandleTable::Reserve(Handle* out_handle) {
|
||||
KScopedDisableDispatch dd(kernel);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ public:
|
||||
explicit KHandleTable(KernelCore& kernel_);
|
||||
~KHandleTable();
|
||||
|
||||
ResultCode Initialize(s32 size) {
|
||||
Result Initialize(s32 size) {
|
||||
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
|
||||
|
||||
// Initialize all fields.
|
||||
@@ -60,7 +60,7 @@ public:
|
||||
return m_max_count;
|
||||
}
|
||||
|
||||
ResultCode Finalize();
|
||||
Result Finalize();
|
||||
bool Remove(Handle handle);
|
||||
|
||||
template <typename T = KAutoObject>
|
||||
@@ -100,10 +100,10 @@ public:
|
||||
return this->template GetObjectWithoutPseudoHandle<T>(handle);
|
||||
}
|
||||
|
||||
ResultCode Reserve(Handle* out_handle);
|
||||
Result Reserve(Handle* out_handle);
|
||||
void Unreserve(Handle handle);
|
||||
|
||||
ResultCode Add(Handle* out_handle, KAutoObject* obj);
|
||||
Result Add(Handle* out_handle, KAutoObject* obj);
|
||||
void Register(Handle handle, KAutoObject* obj);
|
||||
|
||||
template <typename T>
|
||||
|
||||
@@ -15,8 +15,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& scheduler = kernel.Scheduler(core_id);
|
||||
auto& current_thread = *scheduler.GetCurrentThread();
|
||||
auto& current_thread = GetCurrentThread(kernel);
|
||||
|
||||
// If the user disable count is set, we may need to pin the current thread.
|
||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||
@@ -26,7 +25,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||
process->PinCurrentThread(core_id);
|
||||
|
||||
// Set the interrupt flag for the thread.
|
||||
scheduler.GetCurrentThread()->SetInterruptFlag();
|
||||
GetCurrentThread(kernel).SetInterruptFlag();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,8 +17,7 @@ public:
|
||||
bool term)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Only process waits if we're allowed to.
|
||||
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
|
||||
return;
|
||||
|
||||
@@ -15,8 +15,7 @@ class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
||||
bool cancel_timer_task) override {
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/initial_process.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
@@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||
return KMemoryManager::Pool::SystemNonSecure;
|
||||
} else {
|
||||
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
@@ -208,8 +208,8 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random) {
|
||||
// Choose a heap based on our page size request.
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
||||
@@ -257,7 +257,7 @@ ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t nu
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) {
|
||||
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
@@ -293,8 +293,8 @@ ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_page
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages,
|
||||
u32 option, u64 process_id, u8 fill_pattern) {
|
||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern) {
|
||||
ASSERT(out != nullptr);
|
||||
ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
@@ -370,12 +370,12 @@ void KMemoryManager::Close(PAddr address, size_t num_pages) {
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryManager::Close(const KPageLinkedList& pg) {
|
||||
void KMemoryManager::Close(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Close(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
}
|
||||
void KMemoryManager::Open(const KPageLinkedList& pg) {
|
||||
void KMemoryManager::Open(const KPageGroup& pg) {
|
||||
for (const auto& node : pg.Nodes()) {
|
||||
Open(node.GetAddress(), node.GetNumPages());
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ class System;
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageLinkedList;
|
||||
class KPageGroup;
|
||||
|
||||
class KMemoryManager final {
|
||||
public:
|
||||
@@ -65,17 +65,17 @@ public:
|
||||
}
|
||||
|
||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option);
|
||||
ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option,
|
||||
u64 process_id, u8 fill_pattern);
|
||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||
u8 fill_pattern);
|
||||
|
||||
static constexpr size_t MaxManagerCount = 10;
|
||||
|
||||
void Close(PAddr address, size_t num_pages);
|
||||
void Close(const KPageLinkedList& pg);
|
||||
void Close(const KPageGroup& pg);
|
||||
|
||||
void Open(PAddr address, size_t num_pages);
|
||||
void Open(const KPageLinkedList& pg);
|
||||
void Open(const KPageGroup& pg);
|
||||
|
||||
public:
|
||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||
@@ -262,8 +262,8 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool,
|
||||
Direction dir, bool random);
|
||||
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
||||
bool random);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageLinkedList final {
|
||||
class KPageGroup final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
@@ -36,8 +36,8 @@ public:
|
||||
};
|
||||
|
||||
public:
|
||||
KPageLinkedList() = default;
|
||||
KPageLinkedList(u64 address, u64 num_pages) {
|
||||
KPageGroup() = default;
|
||||
KPageGroup(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(KPageLinkedList& other) const {
|
||||
bool IsEqual(KPageGroup& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
@@ -72,7 +72,7 @@ public:
|
||||
return this_node == nodes.end() && other_node == other.nodes.end();
|
||||
}
|
||||
|
||||
ResultCode AddBlock(u64 address, u64 num_pages) {
|
||||
Result AddBlock(u64 address, u64 num_pages) {
|
||||
if (!num_pages) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
@@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
|
||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
return 39;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
@@ -47,9 +47,9 @@ KPageTable::KPageTable(Core::System& system_)
|
||||
|
||||
KPageTable::~KPageTable() = default;
|
||||
|
||||
ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
|
||||
bool enable_aslr, VAddr code_addr,
|
||||
std::size_t code_size, KMemoryManager::Pool pool) {
|
||||
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
KMemoryManager::Pool pool) {
|
||||
|
||||
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
|
||||
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
|
||||
@@ -65,7 +65,6 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
||||
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
|
||||
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
|
||||
|
||||
ASSERT(start <= code_addr);
|
||||
ASSERT(code_addr < code_addr + code_size);
|
||||
ASSERT(code_addr + code_size - 1 <= end - 1);
|
||||
|
||||
@@ -128,7 +127,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
||||
const std::size_t needed_size{
|
||||
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
|
||||
if (alloc_size < needed_size) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
@@ -258,8 +257,8 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
||||
return InitializeMemoryLayout(start, end);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
const u64 size{num_pages * PageSize};
|
||||
|
||||
// Validate the mapping request.
|
||||
@@ -272,7 +271,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::None, KMemoryAttribute::None));
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
||||
&pg, num_pages,
|
||||
KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
|
||||
@@ -284,7 +283,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@@ -314,7 +313,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
const std::size_t num_pages = size / PageSize;
|
||||
|
||||
// Create page groups for the memory being mapped.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
AddRegionToPages(src_address, num_pages, pg);
|
||||
|
||||
// Reprotect the source as kernel-read/not mapped.
|
||||
@@ -345,8 +344,8 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy) {
|
||||
Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy) {
|
||||
// Validate the mapping request.
|
||||
R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
|
||||
ResultInvalidMemoryRegion);
|
||||
@@ -490,7 +489,7 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
return address;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
|
||||
Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
@@ -542,8 +541,97 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
KPageTable& src_page_table, VAddr src_addr) {
|
||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
const size_t size = num_pages * PageSize;
|
||||
const auto& pg = pg_ll.Nodes();
|
||||
const auto& memory_layout = system.Kernel().MemoryLayout();
|
||||
|
||||
// Empty groups are necessarily invalid.
|
||||
if (pg.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We're going to validate that the group we'd expect is the group we see.
|
||||
auto cur_it = pg.begin();
|
||||
PAddr cur_block_address = cur_it->GetAddress();
|
||||
size_t cur_block_pages = cur_it->GetNumPages();
|
||||
|
||||
auto UpdateCurrentIterator = [&]() {
|
||||
if (cur_block_pages == 0) {
|
||||
if ((++cur_it) == pg.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cur_block_address = cur_it->GetAddress();
|
||||
cur_block_pages = cur_it->GetNumPages();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// Begin traversal.
|
||||
Common::PageTable::TraversalContext context;
|
||||
Common::PageTable::TraversalEntry next_entry;
|
||||
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Prepare tracking variables.
|
||||
PAddr cur_addr = next_entry.phys_addr;
|
||||
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
|
||||
size_t tot_size = cur_size;
|
||||
|
||||
// Iterate, comparing expected to actual.
|
||||
while (tot_size < size) {
|
||||
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
||||
const size_t cur_pages = cur_size / PageSize;
|
||||
|
||||
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!UpdateCurrentIterator()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cur_block_address += cur_size;
|
||||
cur_block_pages -= cur_pages;
|
||||
cur_addr = next_entry.phys_addr;
|
||||
cur_size = next_entry.block_size;
|
||||
} else {
|
||||
cur_size += next_entry.block_size;
|
||||
}
|
||||
|
||||
tot_size += next_entry.block_size;
|
||||
}
|
||||
|
||||
// Ensure we compare the right amount for the last block.
|
||||
if (tot_size > size) {
|
||||
cur_size -= (tot_size - size);
|
||||
}
|
||||
|
||||
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!UpdateCurrentIterator()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
@@ -572,7 +660,7 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -633,7 +721,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the new memory.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
&pg, (size - mapped_size) / PageSize,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
||||
@@ -815,7 +903,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
// Lock the physical memory lock.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -884,7 +972,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
}
|
||||
|
||||
// Make a page group for the unmap region.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
{
|
||||
auto& impl = this->PageTableImpl();
|
||||
|
||||
@@ -1046,7 +1134,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState src_state{};
|
||||
@@ -1059,7 +1147,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
KPageLinkedList page_linked_list;
|
||||
KPageGroup page_linked_list;
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
AddRegionToPages(src_addr, num_pages, page_linked_list);
|
||||
@@ -1085,7 +1173,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState src_state{};
|
||||
@@ -1100,8 +1188,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
|
||||
KMemoryPermission::None, KMemoryAttribute::Mask,
|
||||
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
|
||||
|
||||
KPageLinkedList src_pages;
|
||||
KPageLinkedList dst_pages;
|
||||
KPageGroup src_pages;
|
||||
KPageGroup dst_pages;
|
||||
const std::size_t num_pages{size / PageSize};
|
||||
|
||||
AddRegionToPages(src_addr, num_pages, src_pages);
|
||||
@@ -1127,8 +1215,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
||||
KMemoryPermission perm) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
VAddr cur_addr{addr};
|
||||
@@ -1151,8 +1239,8 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
// Check that the map is in range.
|
||||
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
||||
const std::size_t size{num_pages * PageSize};
|
||||
@@ -1175,10 +1263,10 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm) {
|
||||
ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||
|
||||
// Ensure this is a valid map request.
|
||||
@@ -1215,7 +1303,7 @@ ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::siz
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
|
||||
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
VAddr cur_addr{addr};
|
||||
@@ -1233,8 +1321,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||
KMemoryState state) {
|
||||
Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
|
||||
// Check that the unmap is in range.
|
||||
const std::size_t num_pages{page_linked_list.GetNumPages()};
|
||||
const std::size_t size{num_pages * PageSize};
|
||||
@@ -1257,7 +1344,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||
Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
|
||||
// Check that the unmap is in range.
|
||||
const std::size_t size = num_pages * PageSize;
|
||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||
@@ -1281,10 +1368,10 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||
Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) {
|
||||
// Ensure that the page group isn't null.
|
||||
ASSERT(out != nullptr);
|
||||
|
||||
@@ -1306,8 +1393,8 @@ ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
// Lock the table.
|
||||
@@ -1341,7 +1428,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
new_state = KMemoryState::AliasCodeData;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1379,7 +1466,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
|
||||
return QueryInfoImpl(addr);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
|
||||
Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState state{};
|
||||
@@ -1397,7 +1484,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryState state{};
|
||||
@@ -1412,8 +1499,8 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
|
||||
// Lock the table.
|
||||
@@ -1440,7 +1527,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
|
||||
Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
|
||||
const size_t num_pages = size / PageSize;
|
||||
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
|
||||
KMemoryAttribute::SetMask);
|
||||
@@ -1475,7 +1562,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
Result KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
// Lock the table.
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
@@ -1487,7 +1574,7 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
// Lock the physical memory mutex.
|
||||
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
|
||||
|
||||
@@ -1554,7 +1641,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
|
||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate pages for the heap extension.
|
||||
KPageLinkedList pg;
|
||||
KPageGroup pg;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
|
||||
&pg, allocation_size / PageSize,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option)));
|
||||
@@ -1629,7 +1716,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
|
||||
if (is_map_only) {
|
||||
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
||||
} else {
|
||||
KPageLinkedList page_group;
|
||||
KPageGroup page_group;
|
||||
R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
||||
&page_group, needed_num_pages,
|
||||
KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
|
||||
@@ -1641,11 +1728,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
|
||||
return addr;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryPermission perm{};
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
if (const Result result{CheckMemoryState(
|
||||
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
|
||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
|
||||
@@ -1664,11 +1751,11 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
|
||||
KScopedLightLock lk(general_lock);
|
||||
|
||||
KMemoryPermission perm{};
|
||||
if (const ResultCode result{CheckMemoryState(
|
||||
if (const Result result{CheckMemoryState(
|
||||
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
|
||||
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
|
||||
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
|
||||
@@ -1687,25 +1774,24 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
|
||||
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
|
||||
return this->LockMemoryAndOpen(
|
||||
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
|
||||
KMemoryAttribute::All, KMemoryAttribute::None,
|
||||
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
||||
KMemoryPermission::KernelReadWrite),
|
||||
KMemoryAttribute::Locked);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
|
||||
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
|
||||
KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
|
||||
KMemoryAttribute::Locked, nullptr);
|
||||
Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
|
||||
return this->UnlockMemory(
|
||||
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||
Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
|
||||
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
|
||||
|
||||
return ResultSuccess;
|
||||
@@ -1730,13 +1816,11 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
|
||||
}
|
||||
|
||||
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
|
||||
KPageLinkedList& page_linked_list) {
|
||||
KPageGroup& page_linked_list) {
|
||||
VAddr addr{start};
|
||||
while (addr < start + (num_pages * PageSize)) {
|
||||
const PAddr paddr{GetPhysicalAddr(addr)};
|
||||
if (!paddr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(paddr != 0);
|
||||
page_linked_list.AddBlock(paddr, 1);
|
||||
addr += PageSize;
|
||||
}
|
||||
@@ -1751,8 +1835,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
|
||||
IsKernel() ? 1 : 4);
|
||||
}
|
||||
|
||||
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation) {
|
||||
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
@@ -1767,7 +1851,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
|
||||
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
addr += size;
|
||||
@@ -1776,8 +1860,8 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr) {
|
||||
Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr) {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
ASSERT(num_pages > 0);
|
||||
@@ -1798,7 +1882,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
|
||||
case OperationType::ChangePermissionsAndRefresh:
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -1835,7 +1919,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
||||
return code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1871,7 +1954,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
||||
return code_region_end - code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1921,10 +2003,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
// Validate the states match expectation.
|
||||
R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
|
||||
R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
|
||||
@@ -1933,12 +2015,11 @@ ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState st
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Get information about the first block.
|
||||
@@ -1976,12 +2057,12 @@ ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
|
||||
ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
// Get information about the first block.
|
||||
@@ -2038,11 +2119,11 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
|
||||
size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
||||
Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
|
||||
// Validate basic preconditions.
|
||||
ASSERT((lock_attr & attr) == KMemoryAttribute::None);
|
||||
ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
@@ -2096,11 +2177,11 @@ ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_pad
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryPermission new_perm,
|
||||
KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
|
||||
Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr, KMemoryPermission new_perm,
|
||||
KMemoryAttribute lock_attr, const KPageGroup* pg) {
|
||||
// Validate basic preconditions.
|
||||
ASSERT((attr_mask & lock_attr) == lock_attr);
|
||||
ASSERT((attr & lock_attr) == lock_attr);
|
||||
@@ -2125,7 +2206,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_
|
||||
|
||||
// Check the page group.
|
||||
if (pg != nullptr) {
|
||||
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
|
||||
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
|
||||
}
|
||||
|
||||
// Decide on new perm and attr.
|
||||
|
||||
@@ -33,51 +33,49 @@ public:
|
||||
explicit KPageTable(Core::System& system_);
|
||||
~KPageTable();
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
|
||||
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||
VAddr src_addr);
|
||||
Result MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
}
|
||||
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
|
||||
ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
||||
Svc::MemoryPermission svc_perm);
|
||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
ResultCode SetMaxHeapSize(std::size_t size);
|
||||
ResultCode SetHeapSize(VAddr* out, std::size_t size);
|
||||
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
Result ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||
Result SetMaxHeapSize(std::size_t size);
|
||||
Result SetHeapSize(VAddr* out, std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
|
||||
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
|
||||
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
|
||||
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||
|
||||
Common::PageTable& PageTableImpl() {
|
||||
return page_table_impl;
|
||||
@@ -102,82 +100,78 @@ private:
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
|
||||
PAddr phys_addr, bool is_pa_valid, VAddr region_start,
|
||||
std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
|
||||
Result InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
|
||||
KMemoryState state, KMemoryPermission perm);
|
||||
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||
OperationType operation);
|
||||
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
VAddr GetRegionAddress(KMemoryState state) const;
|
||||
std::size_t GetRegionSize(KMemoryState state) const;
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr);
|
||||
}
|
||||
|
||||
ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
|
||||
VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
ResultCode CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
|
||||
std::size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||
ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||
const KPageLinkedList* pg);
|
||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr);
|
||||
Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryPermission new_perm, KMemoryAttribute lock_attr,
|
||||
const KPageGroup* pg);
|
||||
|
||||
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
|
||||
Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return general_lock.IsLockedByCurrentThread();
|
||||
|
||||
@@ -50,7 +50,7 @@ bool KPort::IsServerClosed() const {
|
||||
return state == State::ServerClosed;
|
||||
}
|
||||
|
||||
ResultCode KPort::EnqueueSession(KServerSession* session) {
|
||||
Result KPort::EnqueueSession(KServerSession* session) {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
R_UNLESS(state == State::Normal, ResultPortClosed);
|
||||
@@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
|
||||
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
|
||||
session_ptr->ClientConnected(server.AcceptSession());
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
|
||||
@@ -34,7 +34,7 @@ public:
|
||||
|
||||
bool IsServerClosed() const;
|
||||
|
||||
ResultCode EnqueueSession(KServerSession* session);
|
||||
Result EnqueueSession(KServerSession* session);
|
||||
|
||||
KClientPort& GetClientPort() {
|
||||
return client;
|
||||
|
||||
@@ -57,23 +57,18 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||
thread->GetContext64().cpu_registers[0] = 0;
|
||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||
thread->DisableDispatch();
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
{
|
||||
KScopedSchedulerLock lock{kernel};
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
|
||||
if (system.DebuggerEnabled()) {
|
||||
thread->RequestSuspend(SuspendType::Debug);
|
||||
}
|
||||
if (system.DebuggerEnabled()) {
|
||||
thread->RequestSuspend(SuspendType::Debug);
|
||||
}
|
||||
|
||||
// Run our thread.
|
||||
void(thread->Run());
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit) {
|
||||
Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
process->name = std::move(process_name);
|
||||
@@ -181,7 +176,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
|
||||
KThread* cur_thread =
|
||||
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
|
||||
|
||||
// If the thread isn't terminated, pin it.
|
||||
if (!cur_thread->IsTerminationRequested()) {
|
||||
@@ -198,7 +194,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Get the current thread.
|
||||
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
|
||||
KThread* cur_thread =
|
||||
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
|
||||
|
||||
// Unpin it.
|
||||
cur_thread->Unpin();
|
||||
@@ -222,8 +219,8 @@ void KProcess::UnpinThread(KThread* thread) {
|
||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||
}
|
||||
|
||||
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
[[maybe_unused]] size_t size) {
|
||||
// Lock ourselves, to prevent concurrent access.
|
||||
KScopedLightLock lk(state_lock);
|
||||
|
||||
@@ -275,15 +272,19 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
|
||||
shmem->Close();
|
||||
}
|
||||
|
||||
void KProcess::RegisterThread(const KThread* thread) {
|
||||
void KProcess::RegisterThread(KThread* thread) {
|
||||
KScopedLightLock lk{list_lock};
|
||||
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void KProcess::UnregisterThread(const KThread* thread) {
|
||||
void KProcess::UnregisterThread(KThread* thread) {
|
||||
KScopedLightLock lk{list_lock};
|
||||
|
||||
thread_list.remove(thread);
|
||||
}
|
||||
|
||||
ResultCode KProcess::Reset() {
|
||||
Result KProcess::Reset() {
|
||||
// Lock the process and the scheduler.
|
||||
KScopedLightLock lk(state_lock);
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
@@ -297,8 +298,51 @@ ResultCode KProcess::Reset() {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
std::size_t code_size) {
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
// Lock ourselves and the scheduler.
|
||||
KScopedLightLock lk{state_lock};
|
||||
KScopedLightLock list_lk{list_lock};
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
|
||||
// Validate our state.
|
||||
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
|
||||
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||
|
||||
// Either pause or resume.
|
||||
if (activity == ProcessActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
if (is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
// Suspend all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
thread->RequestSuspend(SuspendType::Process);
|
||||
}
|
||||
|
||||
// Set ourselves as suspended.
|
||||
SetSuspended(true);
|
||||
} else {
|
||||
ASSERT(activity == ProcessActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
if (!is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
|
||||
// Resume all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
thread->Resume(SuspendType::Process);
|
||||
}
|
||||
|
||||
// Set ourselves as resumed.
|
||||
SetSuspended(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
program_id = metadata.GetTitleID();
|
||||
ideal_core = metadata.GetMainThreadCore();
|
||||
is_64bit_process = metadata.Is64BitProgram();
|
||||
@@ -313,24 +357,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
return ResultLimitReached;
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, KMemoryManager::Pool::Application)};
|
||||
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
|
||||
0x8000000, code_size,
|
||||
KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
const auto& caps{metadata.GetKernelCapabilities()};
|
||||
if (const ResultCode result{
|
||||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
@@ -350,7 +394,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
||||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
@@ -377,11 +421,11 @@ void KProcess::PrepareForTermination() {
|
||||
ChangeStatus(ProcessStatus::Exiting);
|
||||
|
||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||
for (auto& thread : in_thread_list) {
|
||||
for (auto* thread : in_thread_list) {
|
||||
if (thread->GetOwnerProcess() != this)
|
||||
continue;
|
||||
|
||||
if (thread == kernel.CurrentScheduler()->GetCurrentThread())
|
||||
if (thread == GetCurrentThreadPointer(kernel))
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
@@ -437,7 +481,7 @@ void KProcess::Finalize() {
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
KThreadLocalPage* tlp = nullptr;
|
||||
VAddr tlr = 0;
|
||||
|
||||
@@ -488,7 +532,7 @@ ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
KThreadLocalPage* page_to_free = nullptr;
|
||||
|
||||
// Release the region.
|
||||
@@ -536,6 +580,52 @@ ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
DebugWatchpointType type) {
|
||||
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
|
||||
return wp.type == DebugWatchpointType::None;
|
||||
})};
|
||||
|
||||
if (watch == watchpoints.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
watch->start_address = addr;
|
||||
watch->end_address = addr + size;
|
||||
watch->type = type;
|
||||
|
||||
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
|
||||
debug_page_refcounts[page]++;
|
||||
system.Memory().MarkRegionDebug(page, PageSize, true);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
DebugWatchpointType type) {
|
||||
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
|
||||
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
|
||||
})};
|
||||
|
||||
if (watch == watchpoints.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
watch->start_address = 0;
|
||||
watch->end_address = 0;
|
||||
watch->type = DebugWatchpointType::None;
|
||||
|
||||
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
|
||||
debug_page_refcounts[page]--;
|
||||
if (!debug_page_refcounts[page]) {
|
||||
system.Memory().MarkRegionDebug(page, PageSize, false);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Svc::MemoryPermission permission) {
|
||||
@@ -556,9 +646,10 @@ bool KProcess::IsSignaled() const {
|
||||
}
|
||||
|
||||
KProcess::KProcess(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_},
|
||||
page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_},
|
||||
address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {}
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
|
||||
kernel_.System())},
|
||||
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
|
||||
state_lock{kernel_}, list_lock{kernel_} {}
|
||||
|
||||
KProcess::~KProcess() = default;
|
||||
|
||||
@@ -572,7 +663,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
@@ -63,6 +64,25 @@ enum class ProcessStatus {
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
enum class ProcessActivity : u32 {
|
||||
Runnable,
|
||||
Paused,
|
||||
};
|
||||
|
||||
enum class DebugWatchpointType : u8 {
|
||||
None = 0,
|
||||
Read = 1 << 0,
|
||||
Write = 1 << 1,
|
||||
ReadOrWrite = Read | Write,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
|
||||
|
||||
struct DebugWatchpoint {
|
||||
VAddr start_address;
|
||||
VAddr end_address;
|
||||
DebugWatchpointType type;
|
||||
};
|
||||
|
||||
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
|
||||
|
||||
@@ -90,8 +110,8 @@ public:
|
||||
|
||||
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
|
||||
|
||||
static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit);
|
||||
static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
|
||||
ProcessType type, KResourceLimit* res_limit);
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
KPageTable& PageTable() {
|
||||
@@ -113,11 +133,11 @@ public:
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
ResultCode SignalToAddress(VAddr address) {
|
||||
Result SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
@@ -125,17 +145,16 @@ public:
|
||||
return condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
}
|
||||
|
||||
ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
}
|
||||
|
||||
ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
@@ -282,17 +301,17 @@ public:
|
||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||
|
||||
/// Gets the list of all threads created with this process as their owner.
|
||||
const std::list<const KThread*>& GetThreadList() const {
|
||||
std::list<KThread*>& GetThreadList() {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const KThread* thread);
|
||||
void RegisterThread(KThread* thread);
|
||||
|
||||
/// Unregisters a thread from this process, removing it
|
||||
/// from this process' thread list.
|
||||
void UnregisterThread(const KThread* thread);
|
||||
void UnregisterThread(KThread* thread);
|
||||
|
||||
/// Clears the signaled state of the process if and only if it's signaled.
|
||||
///
|
||||
@@ -302,7 +321,7 @@ public:
|
||||
/// @pre The process must be in a signaled state. If this is called on a
|
||||
/// process instance that is not signaled, ERR_INVALID_STATE will be
|
||||
/// returned.
|
||||
ResultCode Reset();
|
||||
Result Reset();
|
||||
|
||||
/**
|
||||
* Loads process-specifics configuration info with metadata provided
|
||||
@@ -313,7 +332,7 @@ public:
|
||||
* @returns ResultSuccess if all relevant metadata was able to be
|
||||
* loaded and parsed. Otherwise, an error code is returned.
|
||||
*/
|
||||
ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
|
||||
|
||||
/**
|
||||
* Starts the main application thread for this process.
|
||||
@@ -347,6 +366,8 @@ public:
|
||||
|
||||
void DoWorkerTaskImpl();
|
||||
|
||||
Result SetActivity(ProcessActivity activity);
|
||||
|
||||
void PinCurrentThread(s32 core_id);
|
||||
void UnpinCurrentThread(s32 core_id);
|
||||
void UnpinThread(KThread* thread);
|
||||
@@ -355,17 +376,30 @@ public:
|
||||
return state_lock;
|
||||
}
|
||||
|
||||
ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
// Marks the next available region as used and returns the address of the slot.
|
||||
[[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out);
|
||||
[[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
|
||||
|
||||
// Frees a used TLS slot identified by the given address
|
||||
ResultCode DeleteThreadLocalRegion(VAddr addr);
|
||||
Result DeleteThreadLocalRegion(VAddr addr);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Debug watchpoint management
|
||||
|
||||
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
|
||||
bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
// Attempts to remove the watchpoint specified by the given parameters.
|
||||
bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
|
||||
|
||||
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
|
||||
return watchpoints;
|
||||
}
|
||||
|
||||
private:
|
||||
void PinThread(s32 core_id, KThread* thread) {
|
||||
@@ -388,7 +422,7 @@ private:
|
||||
void ChangeStatus(ProcessStatus new_status);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
@@ -442,7 +476,7 @@ private:
|
||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const KThread*> thread_list;
|
||||
std::list<KThread*> thread_list;
|
||||
|
||||
/// List of shared memory that are running with this process as their owner.
|
||||
std::list<KSharedMemoryInfo*> shared_memory_list;
|
||||
@@ -471,10 +505,13 @@ private:
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
|
||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
|
||||
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
|
||||
std::map<VAddr, u64> debug_page_refcounts;
|
||||
|
||||
KThread* exception_thread{};
|
||||
|
||||
KLightLock state_lock;
|
||||
KLightLock list_lock;
|
||||
|
||||
using TLPTree =
|
||||
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
|
||||
|
||||
@@ -27,7 +27,7 @@ void KReadableEvent::Destroy() {
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Signal() {
|
||||
Result KReadableEvent::Signal() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
@@ -38,13 +38,13 @@ ResultCode KReadableEvent::Signal() {
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Clear() {
|
||||
Result KReadableEvent::Clear() {
|
||||
Reset();
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KReadableEvent::Reset() {
|
||||
Result KReadableEvent::Reset() {
|
||||
KScopedSchedulerLock lk{kernel};
|
||||
|
||||
if (!is_signaled) {
|
||||
|
||||
@@ -33,9 +33,9 @@ public:
|
||||
bool IsSignaled() const override;
|
||||
void Destroy() override;
|
||||
|
||||
ResultCode Signal();
|
||||
ResultCode Clear();
|
||||
ResultCode Reset();
|
||||
Result Signal();
|
||||
Result Clear();
|
||||
Result Reset();
|
||||
|
||||
private:
|
||||
bool is_signaled{};
|
||||
|
||||
@@ -73,7 +73,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
|
||||
return value;
|
||||
}
|
||||
|
||||
ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
|
||||
const auto index = static_cast<std::size_t>(which);
|
||||
KScopedLightLock lk(lock);
|
||||
R_UNLESS(current_values[index] <= value, ResultInvalidState);
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include "core/hle/kernel/k_light_condition_variable.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
|
||||
union ResultCode;
|
||||
union Result;
|
||||
|
||||
namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
s64 GetPeakValue(LimitableResource which) const;
|
||||
s64 GetFreeValue(LimitableResource which) const;
|
||||
|
||||
ResultCode SetLimitValue(LimitableResource which, s64 value);
|
||||
Result SetLimitValue(LimitableResource which, s64 value);
|
||||
|
||||
bool Reserve(LimitableResource which, s64 value);
|
||||
bool Reserve(LimitableResource which, s64 value, s64 timeout);
|
||||
|
||||
@@ -317,7 +317,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
|
||||
|
||||
{
|
||||
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
|
||||
if (best_thread == GetCurrentThread()) {
|
||||
if (best_thread == GetCurrentThreadPointer(kernel)) {
|
||||
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -463,7 +463,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -551,7 +551,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
|
||||
ASSERT(kernel.CurrentProcess() != nullptr);
|
||||
|
||||
// Get the current thread and process.
|
||||
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
|
||||
KThread& cur_thread = GetCurrentThread(kernel);
|
||||
KProcess& cur_process = *kernel.CurrentProcess();
|
||||
|
||||
// If the thread's yield count matches, there's nothing for us to do.
|
||||
@@ -642,7 +642,7 @@ KScheduler::~KScheduler() {
|
||||
ASSERT(!idle_thread);
|
||||
}
|
||||
|
||||
KThread* KScheduler::GetCurrentThread() const {
|
||||
KThread* KScheduler::GetSchedulerCurrentThread() const {
|
||||
if (auto result = current_thread.load(); result) {
|
||||
return result;
|
||||
}
|
||||
@@ -654,7 +654,7 @@ u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleCurrentCore() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
|
||||
|
||||
auto& phys_core = system.Kernel().PhysicalCore(core_id);
|
||||
if (phys_core.IsInterrupted()) {
|
||||
@@ -665,7 +665,7 @@ void KScheduler::RescheduleCurrentCore() {
|
||||
if (state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
} else {
|
||||
GetCurrentThread()->EnableDispatch();
|
||||
GetCurrentThread(system.Kernel()).EnableDispatch();
|
||||
guard.Unlock();
|
||||
}
|
||||
}
|
||||
@@ -710,6 +710,7 @@ void KScheduler::Reload(KThread* thread) {
|
||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||
cpu_core.LoadContext(thread->GetContext32());
|
||||
cpu_core.LoadContext(thread->GetContext64());
|
||||
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
|
||||
cpu_core.SetTlsAddress(thread->GetTLSAddress());
|
||||
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
|
||||
cpu_core.ClearExclusiveState();
|
||||
@@ -717,13 +718,18 @@ void KScheduler::Reload(KThread* thread) {
|
||||
|
||||
void KScheduler::SwitchContextStep2() {
|
||||
// Load context of new thread
|
||||
Reload(GetCurrentThread());
|
||||
Reload(GetCurrentThreadPointer(system.Kernel()));
|
||||
|
||||
RescheduleCurrentCore();
|
||||
}
|
||||
|
||||
void KScheduler::Schedule() {
|
||||
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
|
||||
void KScheduler::ScheduleImpl() {
|
||||
KThread* previous_thread = GetCurrentThread();
|
||||
KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
|
||||
KThread* next_thread = state.highest_priority_thread;
|
||||
|
||||
state.needs_scheduling.store(false);
|
||||
@@ -761,6 +767,7 @@ void KScheduler::ScheduleImpl() {
|
||||
old_context = &previous_thread->GetHostContext();
|
||||
|
||||
// Set the new thread.
|
||||
SetCurrentThread(system.Kernel(), next_thread);
|
||||
current_thread.store(next_thread);
|
||||
|
||||
guard.Unlock();
|
||||
@@ -804,6 +811,7 @@ void KScheduler::SwitchToCurrent() {
|
||||
}
|
||||
}
|
||||
auto thread = next_thread ? next_thread : idle_thread;
|
||||
SetCurrentThread(system.Kernel(), thread);
|
||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||
} while (!is_switch_pending());
|
||||
}
|
||||
@@ -829,6 +837,7 @@ void KScheduler::Initialize() {
|
||||
idle_thread = KThread::Create(system.Kernel());
|
||||
ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
|
||||
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
|
||||
idle_thread->EnableDispatch();
|
||||
}
|
||||
|
||||
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
void Reload(KThread* thread);
|
||||
|
||||
/// Gets the current running thread
|
||||
[[nodiscard]] KThread* GetCurrentThread() const;
|
||||
[[nodiscard]] KThread* GetSchedulerCurrentThread() const;
|
||||
|
||||
/// Gets the idle thread
|
||||
[[nodiscard]] KThread* GetIdleThread() const {
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
|
||||
/// Returns true if the scheduler is idle
|
||||
[[nodiscard]] bool IsIdle() const {
|
||||
return GetCurrentThread() == idle_thread;
|
||||
return GetSchedulerCurrentThread() == idle_thread;
|
||||
}
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
@@ -149,10 +149,7 @@ private:
|
||||
|
||||
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
|
||||
|
||||
void Schedule() {
|
||||
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
|
||||
this->ScheduleImpl();
|
||||
}
|
||||
void Schedule();
|
||||
|
||||
/// Switches the CPU's active thread context to that of the specified thread
|
||||
void ScheduleImpl();
|
||||
|
||||
@@ -79,7 +79,7 @@ std::size_t KServerSession::NumDomainRequestHandlers() const {
|
||||
return manager->DomainHandlerCount();
|
||||
}
|
||||
|
||||
ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
|
||||
if (!context.HasDomainMessageHeader()) {
|
||||
return ResultSuccess;
|
||||
}
|
||||
@@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
||||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
object_id, name);
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*this, context);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
|
||||
Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
|
||||
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
|
||||
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
|
||||
|
||||
@@ -143,8 +143,8 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode result = ResultSuccess;
|
||||
Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
Result result = ResultSuccess;
|
||||
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (manager->HasSessionRequestHandler(context)) {
|
||||
@@ -173,8 +173,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(thread, memory);
|
||||
}
|
||||
|
||||
|
||||
@@ -73,10 +73,10 @@ public:
|
||||
* @param memory Memory context to handle the sync request under.
|
||||
* @param core_timing Core timing context to schedule the request event under.
|
||||
*
|
||||
* @returns ResultCode from the operation.
|
||||
* @returns Result from the operation.
|
||||
*/
|
||||
ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
/// Adds a new domain request handler to the collection of request handlers within
|
||||
/// this ServerSession instance.
|
||||
@@ -103,14 +103,14 @@ public:
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
|
||||
Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
Result CompleteSyncRequest(HLERequestContext& context);
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
|
||||
|
||||
/// This session's HLE request handlers
|
||||
std::shared_ptr<SessionRequestManager> manager;
|
||||
|
||||
@@ -18,12 +18,10 @@ KSharedMemory::~KSharedMemory() {
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageLinkedList&& page_list_,
|
||||
Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_,
|
||||
PAddr physical_address_, std::size_t size_,
|
||||
std::string name_) {
|
||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_) {
|
||||
// Set members.
|
||||
owner_process = owner_process_;
|
||||
device_memory = &device_memory_;
|
||||
@@ -67,8 +65,8 @@ void KSharedMemory::Finalize() {
|
||||
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions) {
|
||||
Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions) {
|
||||
const u64 page_count{(map_size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
@@ -86,7 +84,7 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size
|
||||
ConvertToKMemoryPermission(permissions));
|
||||
}
|
||||
|
||||
ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
||||
Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
|
||||
const u64 page_count{(unmap_size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -26,10 +26,10 @@ public:
|
||||
explicit KSharedMemory(KernelCore& kernel_);
|
||||
~KSharedMemory() override;
|
||||
|
||||
ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_);
|
||||
Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
|
||||
KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
|
||||
Svc::MemoryPermission user_permission_, PAddr physical_address_,
|
||||
std::size_t size_, std::string name_);
|
||||
|
||||
/**
|
||||
* Maps a shared memory block to an address in the target process' address space
|
||||
@@ -38,8 +38,8 @@ public:
|
||||
* @param map_size Size of the shared memory block to map
|
||||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions);
|
||||
Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
|
||||
Svc::MemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Unmaps a shared memory block from an address in the target process' address space
|
||||
@@ -47,7 +47,7 @@ public:
|
||||
* @param address Address in system memory to unmap shared memory block
|
||||
* @param unmap_size Size of the shared memory block to unmap
|
||||
*/
|
||||
ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
|
||||
Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
@@ -77,7 +77,7 @@ public:
|
||||
private:
|
||||
Core::DeviceMemory* device_memory;
|
||||
KProcess* owner_process{};
|
||||
KPageLinkedList page_list;
|
||||
KPageGroup page_list;
|
||||
Svc::MemoryPermission owner_permission{};
|
||||
Svc::MemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user