Compare commits

..

104 Commits

Author SHA1 Message Date
yuzubot
fe79901b2b Android #117 2023-10-30 00:57:51 +00:00
yuzubot
a5f1d625e2 Merge PR 11910 2023-10-30 00:57:51 +00:00
liamwhite
adb0900906 Merge pull request #11911 from german77/leak_event
core: Close all KEvents
2023-10-29 19:46:47 -04:00
liamwhite
2d608cd625 Merge pull request #11909 from t895/card-grid
android: Break home settings into grid with large screens
2023-10-29 19:46:41 -04:00
liamwhite
29955de767 Merge pull request #11904 from ameerj/gl_threaded_opts_on
nvidia_flags: Enable GL Threaded optimizations
2023-10-29 19:46:34 -04:00
liamwhite
b0f62d8f24 Merge pull request #11898 from hebo6/patch-1
Adding StartupWmClass for .desktop file
2023-10-29 19:46:27 -04:00
liamwhite
ed2d77ddbc Merge pull request #11893 from liamwhite/swizzle
renderer_vulkan: fix viewport swizzle dirty state tracking
2023-10-29 19:46:20 -04:00
german77
6e883a26da core: Close all KEvents 2023-10-29 13:52:12 -06:00
Charles Lombardo
a5aa5876b4 android: Break home settings into grid with large screens 2023-10-29 13:47:41 -04:00
liamwhite
911d2216be Merge pull request #11866 from liamwhite/more-qt-nonsense
qt: fix game list shutdown crash
2023-10-29 11:25:22 -04:00
liamwhite
4da2105a32 Merge pull request #11862 from liamwhite/pascal-robust
Manually robust on Pascal and earlier
2023-10-29 11:25:15 -04:00
liamwhite
1f9684eaf9 Merge pull request #11859 from Kelebek1/compute_findbuffer
Add missing loop around compute FindBuffer calls
2023-10-29 11:25:09 -04:00
liamwhite
40c97c0549 Merge pull request #11852 from german77/async_brr
input_common: joycon: Move vibrations to a queue
2023-10-29 11:25:02 -04:00
liamwhite
6aee148b17 Merge pull request #11843 from liamwhite/sync-process
kernel: update KProcess
2023-10-29 11:24:52 -04:00
liamwhite
b5b93e6741 Merge pull request #11827 from liamwhite/preallocated
nvnflinger: fix reporting and freeing of preallocated buffers
2023-10-29 11:24:44 -04:00
Narr the Reg
18a4529851 Merge pull request #11803 from flodavid/improve-controller-applet-click
yuzu: Improve behavior when clicking on controller box in Controller applet
2023-10-29 09:13:07 -06:00
Ameer J
9e4d606c4c nvidia_flags: Enable GL Threaded optimizations 2023-10-28 21:26:22 -04:00
Bo He
64f60f0acb Adding StartupWmClass for .desktop file 2023-10-28 13:45:35 +08:00
Liam
21c631b33b renderer_vulkan: fix viewport swizzle dirty state tracking 2023-10-27 14:23:47 -04:00
liamwhite
43be2bfe33 Merge pull request #11880 from abouvier/unbundle-stb
cmake: prefer system stb headers
2023-10-25 17:21:37 -04:00
Alexandre Bouvier
79ba5d9c26 cmake: prefer system stb headers 2023-10-25 21:47:32 +02:00
liamwhite
008d7e8c5f Merge pull request #11876 from liamwhite/apiversion
vulkan_common: use highest API version
2023-10-25 12:22:21 -04:00
Liam
19e9bde9e0 kernel: make sure new process is in list 2023-10-25 10:05:45 -04:00
liamwhite
6eb3a583cb Merge pull request #11812 from german77/save_capture
service: caps: Implement SaveScreenShotEx0 and variants
2023-10-24 21:43:51 -04:00
Liam
e0834ee50b vulkan_common: use highest API version 2023-10-24 17:04:17 -04:00
Liam
79894152a8 qt: fix game list shutdown crash 2023-10-23 23:06:07 -04:00
liamwhite
9274eaecd0 Merge pull request #11863 from german77/buffer
service: ipc: Add third read buffer index
2023-10-23 17:04:09 -04:00
Narr the Reg
c733620024 service: ipc: Add third read buffer index 2023-10-23 10:33:01 -06:00
german77
897b411ae7 service: caps: Implement SaveScreenShotEx0 and variants 2023-10-23 10:18:22 -06:00
german77
94836ba3b1 externals: stb: Add image write 2023-10-23 10:18:14 -06:00
liamwhite
b1909b0435 Merge pull request #11841 from german77/halp
yuzu: fix restore shortcuts button
2023-10-23 10:36:40 -04:00
liamwhite
1cc764988f Merge pull request #11846 from german77/cheats
cheats: Clamp cheat names without failing
2023-10-23 10:33:37 -04:00
liamwhite
da5c49f22d Merge pull request #11847 from ameerj/glsl-shfl-fix
emit_glsl_warp: Fix shfl_in_bounds conditional
2023-10-23 10:33:24 -04:00
liamwhite
6b93b0b08c Merge pull request #11854 from german77/vibration-ui
yuzu: Fix vibration reseting to 1%
2023-10-23 10:33:03 -04:00
Kelebek1
68f25217b8 Add missing dowhile loops around FindBuffer calls 2023-10-23 15:08:56 +01:00
Liam
0604b14263 Manually robust on Pascal and earlier 2023-10-23 09:08:57 -04:00
german77
3d4a064674 yuzu: Fix vibration reseting to 1% 2023-10-22 13:39:45 -06:00
german77
e4dfd51337 input_common: joycon: Move vibrations to a queue 2023-10-22 11:30:59 -06:00
Ameer J
cfe73af6f2 emit_glsl_warp: Fix shfl_in_bounds conditional 2023-10-22 00:45:23 -04:00
Narr the Reg
77fb9d415b yuzu: Fix restore shortcuts button 2023-10-21 21:16:20 -06:00
german77
bbdaa62175 cheats: Clamp cheat names without failing 2023-10-21 21:04:03 -06:00
Liam
31bffc7299 kernel: fix extraneous ref 2023-10-21 22:16:41 -04:00
Liam
5f8f09d750 kernel: shutdown app before gpu 2023-10-21 20:35:18 -04:00
Liam
dcfe674ed4 kernel: signal thread on termination completed 2023-10-21 20:03:41 -04:00
Liam
bb195c2c2b kernel: add missing TLR clear 2023-10-21 20:03:41 -04:00
Liam
8c59543ee3 kernel: update KProcess 2023-10-21 20:03:41 -04:00
liamwhite
db37e583ff Merge pull request #11831 from liamwhite/hosversionbetween
set: return version info from system archive
2023-10-21 18:22:20 -04:00
liamwhite
d28e826e47 Merge pull request #11830 from liamwhite/ts-session
ts: add OpenSession
2023-10-21 18:22:13 -04:00
liamwhite
13beb85514 Merge pull request #11828 from liamwhite/setthreadescription
common: use SetThreadDescription API for thread names
2023-10-21 18:22:04 -04:00
liamwhite
4b06bcc82c Merge pull request #11789 from Kelebek1/spirv_shift_right
Manually robust on Maxwell and earlier
2023-10-21 18:21:53 -04:00
Liam
12ebc8d9d1 set: return version info from system archive 2023-10-20 13:29:52 -04:00
Liam
2b85e9e997 ts: add OpenSession 2023-10-20 13:29:32 -04:00
Liam
59b62c6507 common: use SetThreadDescription API for thread names 2023-10-20 11:41:29 -04:00
Fernando S
2e760a9833 Merge pull request #11748 from liamwhite/kern_1700
kernel: update for 17.0.0
2023-10-20 17:08:00 +02:00
Fernando S
bab4a13a41 Merge pull request #11825 from liamwhite/system-resource
kernel: fix incorrect calculation of used non system memory value
2023-10-20 16:40:15 +02:00
liamwhite
b56c7397ad Merge pull request #11806 from liamwhite/needs-more-locking
renderer_vulkan: add locks to avoid scheduler flushes from CPU
2023-10-20 10:26:03 -04:00
Liam
689f346e97 nvnflinger: fix reporting and freeing of preallocated buffers
Co-authored-by: Kelebek1 <eeeedddccc@hotmail.co.uk>
2023-10-20 10:17:32 -04:00
Liam
249db0a59b kernel: fix incorrect calculation of used non system memory value 2023-10-20 09:12:10 -04:00
Liam
9526ce95dd gdbstub: add PermissionLocked to mappings table 2023-10-20 02:53:31 -04:00
Liam
687158fe00 kernel: fix format string error 2023-10-20 02:41:32 -04:00
Liam
d8507332c1 kernel: make check fully constexpr for broken msvc constant folding 2023-10-20 02:34:15 -04:00
Liam
67e983a354 codespell: allow 'VAs' 2023-10-20 02:34:15 -04:00
Liam
f21058a6c0 k_page_table: add MapFirstGroup 2023-10-20 02:34:15 -04:00
Liam
b456af31e6 kernel: update KMemoryRegionType values 2023-10-20 02:34:15 -04:00
Liam
0441853d0f k_page_table: implement PermissionLocked 2023-10-20 02:34:15 -04:00
Liam
60a1c6b95b k_page_table: add new CheckMemoryState helper 2023-10-20 02:34:15 -04:00
Liam
794e6c7a96 kernel: split Io memory state, add PermissionLocked attribute 2023-10-20 02:34:15 -04:00
Liam
22afa2c7a3 kernel: reshuffle ini1 size, add slab clear note 2023-10-20 02:34:15 -04:00
liamwhite
85a89ca3e3 Merge pull request #11822 from german77/no-name
service: mii: Create random mii with name
2023-10-19 16:54:05 -04:00
Narr the Reg
26776c0e60 service: mii: Create random mii with name 2023-10-19 13:35:02 -06:00
Kelebek1
e02ee8e59d Manually robust on Maxwell and earlier 2023-10-19 19:54:31 +01:00
liamwhite
134ecca9b0 Merge pull request #11810 from liamwhite/clang-17
general: fix build failure on clang 17
2023-10-18 19:30:29 -04:00
liamwhite
c5f1ec8040 Merge pull request #11795 from Squall-Leonhart/D32FToOther
[Vulkan]Implement missing copy formats for D32, ARGB8_SRGB and BGRA8_Unorm/SRGB
2023-10-18 09:22:14 -04:00
liamwhite
765ea9b79d Merge pull request #11791 from german77/bufferx
service: hle: Allow to access read buffer A and X directly
2023-10-18 09:21:58 -04:00
Liam
c5bdc0054c general: fix build failure on clang 17 2023-10-17 22:44:21 -04:00
flodavid
0b7593d352 yuzu: Improve behavior when clicking on controller box in Controller applet
- Apply changes on Controller configuration of commit 9524d70 to Controller applet
  - Fix regression of this previous commit:
  Enabling a controller in its tab did not activate previous controllers

Signed-off-by: flodavid <fl.david.53@gmail.com>
2023-10-17 23:19:11 +02:00
liamwhite
bd05ace08d Merge pull request #11774 from liamwhite/refcount-issue
fsmitm_romfsbuild: avoid unnecessary copies of vfs pointers
2023-10-17 11:49:11 -04:00
liamwhite
fa56518f20 Merge pull request #11747 from Kelebek1/image_alias_sample_names
Small things
2023-10-17 11:48:57 -04:00
liamwhite
b577d7a55f Merge pull request #11349 from vonchenplus/buffer_cache_crash
video_core: Fix moltenvk crash on macos
2023-10-17 11:48:44 -04:00
Liam
d9dde7e6f3 renderer_vulkan: add locks to avoid scheduler flushes from CPU 2023-10-17 10:00:25 -04:00
Fernando S
2244b613cf Merge pull request #11788 from Squall-Leonhart/IFREMOVED
[crash fix]brings back the removed if  statement in util.cpp and adds the  num_level test to it like previous discontinued PR
2023-10-17 14:36:36 +02:00
german77
c73bb33ff1 service: hle: Allow to access read buffer A and X directly 2023-10-16 23:36:46 -06:00
Narr the Reg
bcce184e60 service: acc: Implement functions needed for profile select (#11653) 2023-10-17 05:12:55 +02:00
Squall-Leonhart
326ebbb2fa Changes based on hardware tests
Removes unnecessary d32f to bgra shader and blit functions,
update vk_texture_cache to use abgr shader for d32f to BGRA formats
updates  abgr to d32f shader to comply with hardware tests
2023-10-17 02:42:40 +11:00
Squall Leonhart
07143ce15c Make Clang happy. 2023-10-17 00:26:19 +11:00
Squall Leonhart
dbc73c6c6c Added missing BuildShader line
Adds `convert_abgr8_to_d32f_frag(BuildShader(device, CONVERT_ABGR8_TO_D32F_FRAG_SPV)),`
2023-10-17 00:15:31 +11:00
liamwhite
8becf13e8b Merge pull request #11786 from v1993/cuda-on-linux
host1x/codecs: enable CUDA on Linux
2023-10-15 22:23:00 -04:00
liamwhite
9e2ebb24df Merge pull request #11794 from german77/linemot
input_common: udp: Avoid crash when trying to map motion before client is ready
2023-10-15 22:22:45 -04:00
Squall Leonhart
90c56f5dc1 added missing trailing line. 2023-10-16 06:07:26 +11:00
Squall Leonhart
4b0291172e meant to add the unorms as well 2023-10-16 04:29:24 +11:00
Squall-Leonhart
12e4757cf3 use texelfetch instead of texturelod 2023-10-16 04:20:45 +11:00
Squall Leonhart
144c0734f5 appease the format gods 2023-10-16 03:24:44 +11:00
Squall-Leonhart
f40f65f5d2 Another missing copy connected to Bravely Default II
adds blit_image_helper.ConvertABGR8ToD32F and fragment shader for performing ABGR and BGRA to D32F copies
2023-10-16 03:17:53 +11:00
Squall-Leonhart
03c3f936cf missed this line when editing the copypasta 2023-10-15 20:58:50 +11:00
Squall-Leonhart
66f41da365 moved line to appease the format gods 2023-10-15 20:54:25 +11:00
Squall-Leonhart
7a986d731b Implement missing formats for Bravely Default 2 2023-10-15 20:43:48 +11:00
german77
eae0570a1c input_common: udp: Avoid crash when trying to map motion before client is ready 2023-10-15 02:13:51 -06:00
Squall Leonhart
b57d98f847 brings back the removed If statement and adds the num_level test
This resolves the out of bounds read/writes in the linear swizzler, it brings back the scaled TOTK Recall bug however, pending further work in the block size calculation.

Recall is not glitched in the Dynamic FPS resolution mod to the degree that it is in the native yuzu scaler, this can be a workaround for the time being.


The recall effect is constructed from multiple 320x180 texture slices, it breaking may have a similar origin to https://github.com/Ryujinx/Ryujinx/pull/5640

but it may also be connected to the other deficiencies identified in the Yuzu size calculations, such as no apparent implementation of slice testing for end of slce depth as opposed to full aligned size as implemented in https://github.com/Ryujinx/Ryujinx/pull/5220
2023-10-15 02:09:28 +11:00
Valeri
762ac5aa9f host1x/codecs: enable CUDA on Linux 2023-10-14 17:35:45 +03:00
Liam
053a16799e fsmitm_romfsbuild: avoid unnecessary copies of vfs pointers 2023-10-13 14:22:52 -04:00
Kelebek1
98cac9410c Get out of render pass before query barriers, fix image names with samples > 1, remove image alias bit 2023-10-11 17:15:35 +01:00
Feng Chen
e69eebb14a video_core: Fix d24r8/s8d24 convert shader build error in moltenvk 2023-09-07 18:01:36 +08:00
Feng Chen
0145c89879 video_core: Add missing scissor update when viewport scale offset disable 2023-09-07 18:01:30 +08:00
Feng Chen
cc4736fa58 video_core: set vertex buffer num to 16, because mvk have when using more than 16 2023-08-23 23:22:55 +08:00
155 changed files with 14775 additions and 11669 deletions

View File

@@ -3,4 +3,4 @@
[codespell]
skip = ./.git,./build,./dist,./Doxyfile,./externals,./LICENSES,./src/android/app/src/main/res
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,zink
ignore-words-list = aci,allright,ba,canonicalizations,deques,froms,hda,inout,lod,masia,nam,nax,nd,optin,pullrequests,pullrequest,te,transfered,unstall,uscaled,vas,zink

View File

@@ -147,3 +147,7 @@ License: GPL-3.0-or-later
Files: src/android/gradle/wrapper/*
Copyright: 2023 yuzu Emulator Project
License: GPL-3.0-or-later
Files: externals/stb/*
Copyright: Sean Barrett
License: MIT

View File

@@ -294,6 +294,7 @@ find_package(lz4 REQUIRED)
find_package(nlohmann_json 3.8 REQUIRED)
find_package(Opus 1.3 MODULE)
find_package(RenderDoc MODULE)
find_package(stb MODULE)
find_package(VulkanMemoryAllocator CONFIG)
find_package(ZLIB 1.2 REQUIRED)
find_package(zstd 1.5 REQUIRED)

View File

@@ -0,0 +1,31 @@
# SPDX-FileCopyrightText: 2023 Alexandre Bouvier <contact@amb.tf>
#
# SPDX-License-Identifier: GPL-3.0-or-later
find_path(stb_image_INCLUDE_DIR stb_image.h PATH_SUFFIXES stb)
find_path(stb_image_resize_INCLUDE_DIR stb_image_resize.h PATH_SUFFIXES stb)
find_path(stb_image_write_INCLUDE_DIR stb_image_write.h PATH_SUFFIXES stb)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(stb
REQUIRED_VARS
stb_image_INCLUDE_DIR
stb_image_resize_INCLUDE_DIR
stb_image_write_INCLUDE_DIR
)
if (stb_FOUND AND NOT TARGET stb::headers)
add_library(stb::headers INTERFACE IMPORTED)
set_property(TARGET stb::headers PROPERTY
INTERFACE_INCLUDE_DIRECTORIES
"${stb_image_INCLUDE_DIR}"
"${stb_image_resize_INCLUDE_DIR}"
"${stb_image_write_INCLUDE_DIR}"
)
endif()
mark_as_advanced(
stb_image_INCLUDE_DIR
stb_image_resize_INCLUDE_DIR
stb_image_write_INCLUDE_DIR
)

View File

@@ -1,5 +1,6 @@
| Pull Request | Commit | Title | Author | Merged? |
|----|----|----|----|----|
| [11910](https://github.com/yuzu-emu/yuzu//pull/11910) | [`8427b9d49`](https://github.com/yuzu-emu/yuzu//pull/11910/files) | renderer_vulkan: ensure exception on surface loss | [liamwhite](https://github.com/liamwhite/) | Yes |
End of merge log. You can find the original README.md below the break.

View File

@@ -13,3 +13,4 @@ Exec=yuzu %f
Categories=Game;Emulator;Qt;
MimeType=application/x-nx-nro;application/x-nx-nso;application/x-nx-nsp;application/x-nx-xci;
Keywords=Nintendo;Switch;
StartupWMClass=yuzu

View File

@@ -168,9 +168,13 @@ if (NOT TARGET LLVM::Demangle)
add_library(LLVM::Demangle ALIAS demangle)
endif()
add_library(stb stb/stb_dxt.cpp stb/stb_image.cpp stb/stb_image_resize.cpp)
add_library(stb stb/stb_dxt.cpp)
target_include_directories(stb PUBLIC ./stb)
if (NOT TARGET stb::headers)
add_library(stb::headers ALIAS stb)
endif()
add_library(bc_decoder bc_decoder/bc_decoder.cpp)
target_include_directories(bc_decoder PUBLIC ./bc_decoder)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1724
externals/stb/stb_image_write.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ import androidx.fragment.app.Fragment
import androidx.fragment.app.activityViewModels
import androidx.navigation.findNavController
import androidx.navigation.fragment.findNavController
import androidx.recyclerview.widget.LinearLayoutManager
import androidx.recyclerview.widget.GridLayoutManager
import com.google.android.material.transition.MaterialSharedAxis
import org.yuzu.yuzu_emu.BuildConfig
import org.yuzu.yuzu_emu.HomeNavigationDirections
@@ -186,7 +186,8 @@ class HomeSettingsFragment : Fragment() {
}
binding.homeSettingsList.apply {
layoutManager = LinearLayoutManager(requireContext())
layoutManager =
GridLayoutManager(requireContext(), resources.getInteger(R.integer.grid_columns))
adapter = HomeSettingAdapter(
requireActivity() as AppCompatActivity,
viewLifecycleOwner,

View File

@@ -16,7 +16,8 @@
<LinearLayout
android:id="@+id/option_layout"
android:layout_width="match_parent"
android:layout_height="wrap_content">
android:layout_height="wrap_content"
android:layout_gravity="center_vertical">
<ImageView
android:id="@+id/option_icon"

View File

@@ -120,6 +120,8 @@ add_library(common STATIC
socket_types.h
spin_lock.cpp
spin_lock.h
stb.cpp
stb.h
steady_clock.cpp
steady_clock.h
stream.cpp
@@ -208,6 +210,8 @@ if (MSVC)
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
)
else()
set_source_files_properties(stb.cpp PROPERTIES COMPILE_OPTIONS "-Wno-implicit-fallthrough;-Wno-missing-declarations;-Wno-missing-field-initializers")
endif()
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
@@ -223,7 +227,7 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC Boost::context Boost::headers fmt::fmt microprofile Threads::Threads)
target_link_libraries(common PUBLIC Boost::context Boost::headers fmt::fmt microprofile stb::headers Threads::Threads)
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd LLVM::Demangle)
if (ANDROID)

View File

@@ -25,6 +25,7 @@ void ConfigureNvidiaEnvironmentFlags() {
void(_putenv(fmt::format("__GL_SHADER_DISK_CACHE_PATH={}", windows_path_string).c_str()));
void(_putenv("__GL_SHADER_DISK_CACHE_SKIP_CLEANUP=1"));
void(_putenv("__GL_THREADED_OPTIMIZATIONS=1"));
#endif
}

8
src/common/stb.cpp Normal file
View File

@@ -0,0 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "common/stb.h"

8
src/common/stb.h Normal file
View File

@@ -0,0 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <stb_image.h>
#include <stb_image_resize.h>
#include <stb_image_write.h>

View File

@@ -11,6 +11,7 @@
#include <mach/mach.h>
#elif defined(_WIN32)
#include <windows.h>
#include "common/string_util.h"
#else
#if defined(__Bitrig__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
#include <pthread_np.h>
@@ -82,29 +83,8 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
#ifdef _MSC_VER
// Sets the debugger-visible name of the current thread.
// Uses trick documented in:
// https://docs.microsoft.com/en-us/visualstudio/debugger/how-to-set-a-thread-name-in-native-code
void SetCurrentThreadName(const char* name) {
static const DWORD MS_VC_EXCEPTION = 0x406D1388;
#pragma pack(push, 8)
struct THREADNAME_INFO {
DWORD dwType; // must be 0x1000
LPCSTR szName; // pointer to name (in user addr space)
DWORD dwThreadID; // thread ID (-1=caller thread)
DWORD dwFlags; // reserved for future use, must be zero
} info;
#pragma pack(pop)
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = std::numeric_limits<DWORD>::max();
info.dwFlags = 0;
__try {
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
} __except (EXCEPTION_CONTINUE_EXECUTION) {
}
SetThreadDescription(GetCurrentThread(), UTF8ToUTF16W(name).data());
}
#else // !MSVC_VER, so must be POSIX threads

View File

@@ -86,9 +86,9 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
std::map<std::string, Symbols::Symbols> symbols;
for (const auto& module : modules) {
symbols.insert_or_assign(
module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
system.ApplicationProcess()->Is64BitProcess()));
symbols.insert_or_assign(module.second,
Symbols::GetSymbols(module.first, system.ApplicationMemory(),
system.ApplicationProcess()->Is64Bit()));
}
for (auto& entry : out) {

View File

@@ -116,11 +116,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
}
}
if (concat.empty()) {
return nullptr;
}
return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(concat, dir->GetName());
return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(dir->GetName(),
std::move(concat));
}
if (Common::FS::IsDir(path)) {
@@ -312,17 +309,10 @@ struct System::Impl {
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
// Create a resource limit for the process.
const auto physical_memory_size =
kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
// Create the process.
auto main_process = Kernel::KProcess::Create(system.Kernel());
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
Kernel::KProcess::ProcessType::Userland, resource_limit)
.IsSuccess());
Kernel::KProcess::Register(system.Kernel(), main_process);
kernel.AppendNewProcess(main_process);
kernel.MakeApplicationProcess(main_process);
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
@@ -421,6 +411,7 @@ struct System::Impl {
services->KillNVNFlinger();
}
kernel.CloseServices();
kernel.ShutdownCores();
services.reset();
service_manager.reset();
cheat_engine.reset();
@@ -432,7 +423,6 @@ struct System::Impl {
gpu_core.reset();
host1x_core.reset();
perf_stats.reset();
kernel.ShutdownCores();
cpu_manager.Shutdown();
debugger.reset();
kernel.Shutdown();

View File

@@ -258,20 +258,20 @@ private:
Kernel::KScopedSchedulerLock sl{system.Kernel()};
// Put all threads to sleep on next scheduler round.
for (auto* thread : ThreadList()) {
thread->RequestSuspend(Kernel::SuspendType::Debug);
for (auto& thread : ThreadList()) {
thread.RequestSuspend(Kernel::SuspendType::Debug);
}
}
void ResumeEmulation(Kernel::KThread* except = nullptr) {
// Wake up all threads.
for (auto* thread : ThreadList()) {
if (thread == except) {
for (auto& thread : ThreadList()) {
if (std::addressof(thread) == except) {
continue;
}
thread->SetStepState(Kernel::StepState::NotStepping);
thread->Resume(Kernel::SuspendType::Debug);
thread.SetStepState(Kernel::StepState::NotStepping);
thread.Resume(Kernel::SuspendType::Debug);
}
}
@@ -283,13 +283,17 @@ private:
}
void UpdateActiveThread() {
const auto& threads{ThreadList()};
if (std::find(threads.begin(), threads.end(), state->active_thread) == threads.end()) {
state->active_thread = threads.front();
auto& threads{ThreadList()};
for (auto& thread : threads) {
if (std::addressof(thread) == state->active_thread) {
// Thread is still alive, no need to update.
return;
}
}
state->active_thread = std::addressof(threads.front());
}
const std::list<Kernel::KThread*>& ThreadList() {
Kernel::KProcess::ThreadList& ThreadList() {
return system.ApplicationProcess()->GetThreadList();
}

View File

@@ -109,7 +109,7 @@ static std::string EscapeXML(std::string_view data) {
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
: DebuggerFrontend(backend_), system{system_} {
if (system.ApplicationProcess()->Is64BitProcess()) {
if (system.ApplicationProcess()->Is64Bit()) {
arch = std::make_unique<GDBStubA64>();
} else {
arch = std::make_unique<GDBStubA32>();
@@ -446,10 +446,10 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
const Kernel::KThread* thread) {
const Kernel::KThread& thread) {
// Read thread type from TLS
const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
const VAddr argument_thread_type{thread->GetArgument()};
const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
const VAddr argument_thread_type{thread.GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
@@ -477,10 +477,10 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
}
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
const Kernel::KThread* thread) {
const Kernel::KThread& thread) {
// Read thread type from TLS
const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
const VAddr argument_thread_type{thread->GetArgument()};
const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
const VAddr argument_thread_type{thread.GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
@@ -508,16 +508,16 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
}
static std::optional<std::string> GetThreadName(Core::System& system,
const Kernel::KThread* thread) {
if (system.ApplicationProcess()->Is64BitProcess()) {
const Kernel::KThread& thread) {
if (system.ApplicationProcess()->Is64Bit()) {
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
} else {
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
}
}
static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
switch (thread->GetWaitReasonForDebugging()) {
static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
switch (thread.GetWaitReasonForDebugging()) {
case Kernel::ThreadWaitReasonForDebugging::Sleep:
return "Sleep";
case Kernel::ThreadWaitReasonForDebugging::IPC:
@@ -535,8 +535,8 @@ static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
}
}
static std::string GetThreadState(const Kernel::KThread* thread) {
switch (thread->GetState()) {
static std::string GetThreadState(const Kernel::KThread& thread) {
switch (thread.GetState()) {
case Kernel::ThreadState::Initialized:
return "Initialized";
case Kernel::ThreadState::Waiting:
@@ -604,7 +604,7 @@ void GDBStub::HandleQuery(std::string_view command) {
const auto& threads = system.ApplicationProcess()->GetThreadList();
std::vector<std::string> thread_ids;
for (const auto& thread : threads) {
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
}
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
} else if (command.starts_with("sThreadInfo")) {
@@ -616,14 +616,14 @@ void GDBStub::HandleQuery(std::string_view command) {
buffer += "<threads>";
const auto& threads = system.ApplicationProcess()->GetThreadList();
for (const auto* thread : threads) {
for (const auto& thread : threads) {
auto thread_name{GetThreadName(system, thread)};
if (!thread_name) {
thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
thread_name = fmt::format("Thread {:d}", thread.GetThreadId());
}
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
thread->GetThreadId(), thread->GetActiveCore(),
thread.GetThreadId(), thread.GetActiveCore(),
EscapeXML(*thread_name), GetThreadState(thread));
}
@@ -822,11 +822,13 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-';
const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-';
const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-';
const char p =
True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-';
reply +=
fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n",
mem_info.base_address, mem_info.base_address + mem_info.size - 1,
perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count);
reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n",
mem_info.base_address,
mem_info.base_address + mem_info.size - 1, perm, state, l, i,
d, u, p, mem_info.ipc_count, mem_info.device_count);
}
const uintptr_t next_address = mem_info.base_address + mem_info.size;
@@ -848,10 +850,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
}
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
const auto& threads{system.ApplicationProcess()->GetThreadList()};
for (auto* thread : threads) {
if (thread->GetThreadId() == thread_id) {
return thread;
auto& threads{system.ApplicationProcess()->GetThreadList()};
for (auto& thread : threads) {
if (thread.GetThreadId() == thread_id) {
return std::addressof(thread);
}
}

View File

@@ -107,62 +107,56 @@ static u64 romfs_get_hash_table_count(u64 num_entries) {
void RomFSBuildContext::VisitDirectory(VirtualDir romfs_dir, VirtualDir ext_dir,
std::shared_ptr<RomFSBuildDirectoryContext> parent) {
std::vector<std::shared_ptr<RomFSBuildDirectoryContext>> child_dirs;
for (auto& child_romfs_file : romfs_dir->GetFiles()) {
const auto name = child_romfs_file->GetName();
const auto child = std::make_shared<RomFSBuildFileContext>();
// Set child's path.
child->cur_path_ofs = parent->path_len + 1;
child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
child->path = parent->path + "/" + name;
const auto entries = romfs_dir->GetEntries();
if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
continue;
}
for (const auto& kv : entries) {
if (kv.second == VfsEntryType::Directory) {
const auto child = std::make_shared<RomFSBuildDirectoryContext>();
// Set child's path.
child->cur_path_ofs = parent->path_len + 1;
child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
child->path = parent->path + "/" + kv.first;
// Sanity check on path_len
ASSERT(child->path_len < FS_MAX_PATH);
if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) {
continue;
}
child->source = std::move(child_romfs_file);
// Sanity check on path_len
ASSERT(child->path_len < FS_MAX_PATH);
if (AddDirectory(parent, child)) {
child_dirs.push_back(child);
}
} else {
const auto child = std::make_shared<RomFSBuildFileContext>();
// Set child's path.
child->cur_path_ofs = parent->path_len + 1;
child->path_len = child->cur_path_ofs + static_cast<u32>(kv.first.size());
child->path = parent->path + "/" + kv.first;
if (ext_dir != nullptr && ext_dir->GetFile(kv.first + ".stub") != nullptr) {
continue;
}
// Sanity check on path_len
ASSERT(child->path_len < FS_MAX_PATH);
child->source = romfs_dir->GetFile(kv.first);
if (ext_dir != nullptr) {
if (const auto ips = ext_dir->GetFile(kv.first + ".ips")) {
if (auto patched = PatchIPS(child->source, ips)) {
child->source = std::move(patched);
}
if (ext_dir != nullptr) {
if (const auto ips = ext_dir->GetFile(name + ".ips")) {
if (auto patched = PatchIPS(child->source, ips)) {
child->source = std::move(patched);
}
}
child->size = child->source->GetSize();
AddFile(parent, child);
}
child->size = child->source->GetSize();
AddFile(parent, child);
}
for (auto& child : child_dirs) {
auto subdir_name = std::string_view(child->path).substr(child->cur_path_ofs);
auto child_romfs_dir = romfs_dir->GetSubdirectory(subdir_name);
auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(subdir_name) : nullptr;
for (auto& child_romfs_dir : romfs_dir->GetSubdirectories()) {
const auto name = child_romfs_dir->GetName();
const auto child = std::make_shared<RomFSBuildDirectoryContext>();
// Set child's path.
child->cur_path_ofs = parent->path_len + 1;
child->path_len = child->cur_path_ofs + static_cast<u32>(name.size());
child->path = parent->path + "/" + name;
if (ext_dir != nullptr && ext_dir->GetFile(name + ".stub") != nullptr) {
continue;
}
// Sanity check on path_len
ASSERT(child->path_len < FS_MAX_PATH);
if (!AddDirectory(parent, child)) {
continue;
}
auto child_ext_dir = ext_dir != nullptr ? ext_dir->GetSubdirectory(name) : nullptr;
this->VisitDirectory(child_romfs_dir, child_ext_dir, child);
}
}
@@ -293,7 +287,7 @@ std::multimap<u64, VirtualFile> RomFSBuildContext::Build() {
cur_entry.name_size = name_size;
out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, cur_file->source);
out.emplace(cur_file->offset + ROMFS_FILEPARTITION_OFS, std::move(cur_file->source));
std::memcpy(file_table.data() + cur_file->entry_offset, &cur_entry, sizeof(RomFSFileEntry));
std::memset(file_table.data() + cur_file->entry_offset + sizeof(RomFSFileEntry), 0,
Common::AlignUp(cur_entry.name_size, 4));

View File

@@ -377,16 +377,16 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
auto romfs_dir = FindSubdirectoryCaseless(subdir, "romfs");
if (romfs_dir != nullptr)
layers.push_back(std::make_shared<CachedVfsDirectory>(romfs_dir));
layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(romfs_dir)));
auto ext_dir = FindSubdirectoryCaseless(subdir, "romfs_ext");
if (ext_dir != nullptr)
layers_ext.push_back(std::make_shared<CachedVfsDirectory>(ext_dir));
layers_ext.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(ext_dir)));
if (type == ContentRecordType::HtmlDocument) {
auto manual_dir = FindSubdirectoryCaseless(subdir, "manual_html");
if (manual_dir != nullptr)
layers.push_back(std::make_shared<CachedVfsDirectory>(manual_dir));
layers.emplace_back(std::make_shared<CachedVfsDirectory>(std::move(manual_dir)));
}
}
@@ -400,7 +400,7 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
return;
}
layers.push_back(std::move(extracted));
layers.emplace_back(std::move(extracted));
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
if (layered == nullptr) {

View File

@@ -104,16 +104,16 @@ Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
}
/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
// Allow use of cores 0~3 and thread priorities 1~63.
constexpr u32 default_thread_info_capability = 0x30007F7;
// Allow use of cores 0~3 and thread priorities 16~63.
constexpr u32 default_thread_info_capability = 0x30043F7;
ProgramMetadata result;
result.LoadManual(
true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/,
0x1FE00000 /*system_resource_size*/, {default_thread_info_capability} /*capabilities*/);
0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x100000 /*main_thread_stack_size*/,
0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 0 /*system_resource_size*/,
{default_thread_info_capability} /*capabilities*/);
return result;
}

View File

@@ -73,6 +73,9 @@ public:
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
const std::array<u8, 0x10>& GetName() const {
return npdm_header.application_name;
}
void Print() const;
@@ -164,14 +167,14 @@ private:
u32_le unk_size_2;
};
Header npdm_header;
AciHeader aci_header;
AcidHeader acid_header;
Header npdm_header{};
AciHeader aci_header{};
AcidHeader acid_header{};
FileAccessControl acid_file_access;
FileAccessHeader aci_file_access;
FileAccessControl acid_file_access{};
FileAccessHeader aci_file_access{};
KernelCapabilityDescriptors aci_kernel_capabilities;
KernelCapabilityDescriptors aci_kernel_capabilities{};
};
} // namespace FileSys

View File

@@ -322,7 +322,8 @@ VirtualFile RegisteredCache::OpenFileOrDirectoryConcat(const VirtualDir& open_di
return nullptr;
}
return ConcatenatedVfsFile::MakeConcatenatedFile(concat, concat.front()->GetName());
auto name = concat.front()->GetName();
return ConcatenatedVfsFile::MakeConcatenatedFile(std::move(name), std::move(concat));
}
VirtualFile RegisteredCache::GetFileAtID(NcaID id) const {

View File

@@ -133,7 +133,7 @@ VirtualDir ExtractRomFS(VirtualFile file, RomFSExtractionType type) {
out = out->GetSubdirectories().front();
}
return std::make_shared<CachedVfsDirectory>(out);
return std::make_shared<CachedVfsDirectory>(std::move(out));
}
VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
@@ -141,8 +141,7 @@ VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
return nullptr;
RomFSBuildContext ctx{dir, ext};
auto file_map = ctx.Build();
return ConcatenatedVfsFile::MakeConcatenatedFile(0, file_map, dir->GetName());
return ConcatenatedVfsFile::MakeConcatenatedFile(0, dir->GetName(), ctx.Build());
}
} // namespace FileSys

View File

@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
#include "core/file_sys/system_archive/system_version.h"
#include "core/file_sys/vfs_vector.h"
#include "core/hle/api_version.h"
@@ -12,6 +13,9 @@ std::string GetLongDisplayVersion() {
}
VirtualDir SystemVersion() {
LOG_WARNING(Common_Filesystem, "called - Using hardcoded firmware version '{}'",
GetLongDisplayVersion());
VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file");
file->WriteObject(HLE::ApiVersion::HOS_VERSION_MAJOR, 0);
file->WriteObject(HLE::ApiVersion::HOS_VERSION_MINOR, 1);

View File

@@ -6,13 +6,13 @@
namespace FileSys {
CachedVfsDirectory::CachedVfsDirectory(VirtualDir& source_dir)
CachedVfsDirectory::CachedVfsDirectory(VirtualDir&& source_dir)
: name(source_dir->GetName()), parent(source_dir->GetParentDirectory()) {
for (auto& dir : source_dir->GetSubdirectories()) {
dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(dir));
dirs.emplace(dir->GetName(), std::make_shared<CachedVfsDirectory>(std::move(dir)));
}
for (auto& file : source_dir->GetFiles()) {
files.emplace(file->GetName(), file);
files.emplace(file->GetName(), std::move(file));
}
}

View File

@@ -11,7 +11,7 @@ namespace FileSys {
class CachedVfsDirectory : public ReadOnlyVfsDirectory {
public:
CachedVfsDirectory(VirtualDir& source_directory);
CachedVfsDirectory(VirtualDir&& source_directory);
~CachedVfsDirectory() override;
VirtualFile GetFile(std::string_view file_name) const override;

View File

@@ -10,7 +10,7 @@
namespace FileSys {
ConcatenatedVfsFile::ConcatenatedVfsFile(ConcatenationMap&& concatenation_map_, std::string&& name_)
ConcatenatedVfsFile::ConcatenatedVfsFile(std::string&& name_, ConcatenationMap&& concatenation_map_)
: concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) {
DEBUG_ASSERT(this->VerifyContinuity());
}
@@ -30,8 +30,8 @@ bool ConcatenatedVfsFile::VerifyContinuity() const {
ConcatenatedVfsFile::~ConcatenatedVfsFile() = default;
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualFile>& files,
std::string&& name) {
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::string&& name,
std::vector<VirtualFile>&& files) {
// Fold trivial cases.
if (files.empty()) {
return nullptr;
@@ -46,20 +46,21 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualF
u64 last_offset = 0;
for (auto& file : files) {
const auto size = file->GetSize();
concatenation_map.emplace_back(ConcatenationEntry{
.offset = last_offset,
.file = file,
.file = std::move(file),
});
last_offset += file->GetSize();
last_offset += size;
}
return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
}
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
const std::multimap<u64, VirtualFile>& files,
std::string&& name) {
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, std::string&& name,
std::multimap<u64, VirtualFile>&& files) {
// Fold trivial cases.
if (files.empty()) {
return nullptr;
@@ -76,6 +77,8 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
// Iteration of a multimap is ordered, so offset will be strictly non-decreasing.
for (auto& [offset, file] : files) {
const auto size = file->GetSize();
if (offset > last_offset) {
concatenation_map.emplace_back(ConcatenationEntry{
.offset = last_offset,
@@ -85,13 +88,13 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
concatenation_map.emplace_back(ConcatenationEntry{
.offset = offset,
.file = file,
.file = std::move(file),
});
last_offset = offset + file->GetSize();
last_offset = offset + size;
}
return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
return VirtualFile(new ConcatenatedVfsFile(std::move(name), std::move(concatenation_map)));
}
std::string ConcatenatedVfsFile::GetName() const {

View File

@@ -24,22 +24,20 @@ private:
};
using ConcatenationMap = std::vector<ConcatenationEntry>;
explicit ConcatenatedVfsFile(std::vector<ConcatenationEntry>&& concatenation_map,
std::string&& name);
explicit ConcatenatedVfsFile(std::string&& name,
std::vector<ConcatenationEntry>&& concatenation_map);
bool VerifyContinuity() const;
public:
~ConcatenatedVfsFile() override;
/// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases.
static VirtualFile MakeConcatenatedFile(const std::vector<VirtualFile>& files,
std::string&& name);
static VirtualFile MakeConcatenatedFile(std::string&& name, std::vector<VirtualFile>&& files);
/// Convenience function that turns a map of offsets to files into a concatenated file, filling
/// gaps with a given filler byte.
static VirtualFile MakeConcatenatedFile(u8 filler_byte,
const std::multimap<u64, VirtualFile>& files,
std::string&& name);
static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::string&& name,
std::multimap<u64, VirtualFile>&& files);
std::string GetName() const override;
std::size_t GetSize() const override;

View File

@@ -38,7 +38,7 @@ VirtualDir LayeredVfsDirectory::GetDirectoryRelative(std::string_view path) cons
for (const auto& layer : dirs) {
auto dir = layer->GetDirectoryRelative(path);
if (dir != nullptr) {
out.push_back(std::move(dir));
out.emplace_back(std::move(dir));
}
}
@@ -62,11 +62,11 @@ std::vector<VirtualFile> LayeredVfsDirectory::GetFiles() const {
std::set<std::string, std::less<>> out_names;
for (const auto& layer : dirs) {
for (const auto& file : layer->GetFiles()) {
for (auto& file : layer->GetFiles()) {
auto file_name = file->GetName();
if (!out_names.contains(file_name)) {
out_names.emplace(std::move(file_name));
out.push_back(file);
out.emplace_back(std::move(file));
}
}
}
@@ -86,7 +86,7 @@ std::vector<VirtualDir> LayeredVfsDirectory::GetSubdirectories() const {
std::vector<VirtualDir> out;
out.reserve(names.size());
for (const auto& subdir : names)
out.push_back(GetSubdirectory(subdir));
out.emplace_back(GetSubdirectory(subdir));
return out;
}

View File

@@ -8,7 +8,11 @@
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_trace.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel::Board::Nintendo::Nx {
@@ -30,6 +34,8 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
constexpr const std::size_t SecureAlignment = 128_KiB;
namespace {
using namespace Common::Literals;
@@ -183,4 +189,57 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
return GenerateUniformRange(min, max, GenerateRandomU64);
}
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
return 0;
} else {
// return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
return size;
}
}
Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
u32 pool) {
// Applet secure memory is handled separately.
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
// Ensure the size is aligned.
const size_t alignment =
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
// Allocate the memory.
const size_t num_pages = size / PageSize;
const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
num_pages, alignment / PageSize,
KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
KMemoryManager::Direction::FromFront));
R_UNLESS(paddr != 0, ResultOutOfMemory);
// Ensure we don't leak references to the memory on error.
ON_RESULT_FAILURE {
kernel.MemoryManager().Close(paddr, num_pages);
};
// We succeeded.
*out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
R_SUCCEED();
}
void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
u32 pool) {
// Applet secure memory is handled separately.
UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
// Ensure the size is aligned.
const size_t alignment =
(pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
ASSERT(Common::IsAligned(GetInteger(address), alignment));
ASSERT(Common::IsAligned(size, alignment));
// Close the secure region's pages.
kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
size / PageSize);
}
} // namespace Kernel::Board::Nintendo::Nx

View File

@@ -4,6 +4,11 @@
#pragma once
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
}
namespace Kernel::Board::Nintendo::Nx {
@@ -25,8 +30,16 @@ public:
static std::size_t GetMinimumNonSecureSystemPoolSize();
};
// Randomness.
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
// Secure Memory.
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
u32 pool);
static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
u32 pool);
};
} // namespace Kernel::Board::Nintendo::Nx

View File

@@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize ==
/// memory.
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
KVirtualAddress slab_addr) {
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
slab_addr -= memory_layout.GetSlabRegion().GetAddress();
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
}
@@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
// Get the start of the slab region, since that's where we'll be working.
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
const KMemoryRegion& slab_region = memory_layout.GetSlabRegion();
KVirtualAddress address = slab_region.GetAddress();
// Clear the slab region.
// TODO: implement access to kernel VAs.
// std::memset(device_ptr, 0, slab_region.GetSize());
// Initialize slab type array to be in sorted order.
std::array<KSlabType, KSlabType_Count> slab_types;

View File

@@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
MainMemoryAddress);
}
static inline size_t GetInitialProcessBinarySize() {
return InitialProcessBinarySizeMax;
}
} // namespace Kernel

View File

@@ -200,8 +200,8 @@ private:
RawCapabilityValue raw;
BitField<0, 15, CapabilityType> id;
BitField<15, 4, u32> major_version;
BitField<19, 13, u32> minor_version;
BitField<15, 4, u32> minor_version;
BitField<19, 13, u32> major_version;
};
union HandleTable {

View File

@@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
KConditionVariable::~KConditionVariable() = default;
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
KScopedSchedulerLock sl(m_kernel);
KScopedSchedulerLock sl(kernel);
// Remove waiter thread.
bool has_waiters{};
@@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
// Write the value to userspace.
Result result{ResultSuccess};
if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
result = ResultInvalidCurrentMemory;
@@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
}
}
Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
u32 value) {
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
KThread* owner_thread{};
{
KScopedSchedulerLock sl(m_kernel);
KScopedSchedulerLock sl(kernel);
// Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
// Read the tag from userspace.
u32 test_tag{};
R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
ResultInvalidCurrentMemory);
R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
// Get the lock owner thread.
owner_thread = GetCurrentProcess(m_kernel)
owner_thread = GetCurrentProcess(kernel)
.GetHandleTable()
.GetObjectWithoutPseudoHandle<KThread>(handle)
.ReleasePointerUnsafe();

View File

@@ -24,11 +24,12 @@ public:
explicit KConditionVariable(Core::System& system);
~KConditionVariable();
// Arbitration
Result SignalToAddress(KProcessAddress addr);
Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
// Arbitration.
static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
u32 value);
// Condition variable
// Condition variable.
void Signal(u64 cv_key, s32 count);
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);

View File

@@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
KScopedSchedulerLock sl{kernel};
// Pin the current thread.
process->PinCurrentThread(core_id);
process->PinCurrentThread();
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();

View File

@@ -36,6 +36,7 @@ enum class KMemoryState : u32 {
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
FlagLinearMapped = (1 << 26),
FlagCanPermissionLock = (1 << 27),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
@@ -50,12 +51,16 @@ enum class KMemoryState : u32 {
FlagLinearMapped,
Free = static_cast<u32>(Svc::MemoryState::Free),
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
IoRegister =
static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
FlagCanCodeMemory | FlagCanPermissionLock,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
@@ -65,7 +70,8 @@ enum class KMemoryState : u32 {
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData |
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory,
FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory |
FlagCanPermissionLock,
Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -73,7 +79,7 @@ enum class KMemoryState : u32 {
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped,
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
@@ -94,7 +100,7 @@ enum class KMemoryState : u32 {
NonDeviceIpc =
static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc,
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
Kernel = static_cast<u32>(Svc::MemoryState::Kernel),
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
@@ -105,34 +111,36 @@ enum class KMemoryState : u32 {
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc |
FlagCanUseNonDeviceIpc,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001);
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817);
enum class KMemoryPermission : u8 {
None = 0,
@@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 {
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared),
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked),
SetMask = Uncached,
SetMask = Uncached | PermissionLocked,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
@@ -261,6 +270,10 @@ struct KMemoryInfo {
return m_state;
}
constexpr Svc::MemoryState GetSvcState() const {
return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask);
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
@@ -326,6 +339,10 @@ public:
return this->GetEndAddress() - 1;
}
constexpr KMemoryState GetState() const {
return m_memory_state;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
@@ -443,6 +460,13 @@ public:
}
}
constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) {
ASSERT(False(mask & KMemoryAttribute::IpcLocked));
ASSERT(False(mask & KMemoryAttribute::DeviceShared));
m_attribute = (m_attribute & ~mask) | attr;
}
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));

View File

@@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
}
// Update block state.
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
it->Update(state, perm, attr, it->GetAddress() == address,
static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
@@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
KProcessAddress address, size_t num_pages,
KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attr) {
KMemoryPermission perm, KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
@@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
}
// Update block state.
it->Update(state, perm, attr, false, 0, 0);
it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
@@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator,
KProcessAddress address, size_t num_pages,
KMemoryAttribute mask, KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if ((it->GetAttribute() & mask) != attr) {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != GetInteger(cur_address)) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->UpdateAttribute(mask, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
// If we already have the right attributes, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
// Debug.
bool KMemoryBlockManager::CheckState() const {
// Loop over every block, ensuring that we are sorted and coalesced.

View File

@@ -115,7 +115,11 @@ public:
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr);
KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr);
iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(

View File

@@ -137,11 +137,9 @@ public:
return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack);
}
KVirtualAddress GetSlabRegionAddress() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab))
.GetAddress();
const KMemoryRegion& GetSlabRegion() const {
return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab));
}
const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const {
return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type));
}

View File

@@ -11,6 +11,7 @@
#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -119,7 +120,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
// Free each region to its corresponding heap.
size_t reserved_sizes[MaxManagerCount] = {};
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
const size_t ini_size = GetInitialProcessBinarySize();
const KPhysicalAddress ini_end = ini_start + ini_size;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
@@ -137,13 +139,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
}
// Open/reserve the ini memory.
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
manager.OpenFirst(ini_start, ini_size / PageSize);
reserved_sizes[it.GetAttributes()] += ini_size;
// Free memory after the ini to the heap.
if (ini_last != cur_last) {
ASSERT(cur_end != 0);
manager.Free(ini_end, cur_end - ini_end);
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
}
} else {
// Ensure there's no partial overlap with the ini image.
@@ -167,11 +169,37 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
const u32 pool_index = static_cast<u32>(pool);
// Lock the pool.
KScopedLightLock lk(m_pool_locks[pool_index]);
// Check that we don't already have an optimized process.
R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
// Set the optimized process id.
m_optimized_process_ids[pool_index] = process_id;
m_has_optimized_process[pool_index] = true;
// Clear the management area for the optimized process.
for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
manager = this->GetNextManager(manager, Direction::FromFront)) {
manager->InitializeOptimizedMemory(m_system.Kernel());
}
R_SUCCEED();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
UNREACHABLE();
const u32 pool_index = static_cast<u32>(pool);
// Lock the pool.
KScopedLightLock lk(m_pool_locks[pool_index]);
// If the process was optimized, clear it.
if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
m_has_optimized_process[pool_index] = false;
}
}
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
@@ -206,7 +234,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
// Maintain the optimized memory bitmap, if we should.
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
UNIMPLEMENTED();
chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
}
// Open the first reference to the pages.
@@ -254,7 +282,8 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
// Maintain the optimized memory bitmap, if we should.
if (unoptimized) {
UNIMPLEMENTED();
cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
pages_per_alloc);
}
num_pages -= pages_per_alloc;
@@ -357,8 +386,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Process part or all of the block.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
any_new =
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
cur_pages, fill_pattern);
// Advance.
cur_address += cur_pages * PageSize;
@@ -381,7 +410,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
// Track some or all of the current pages.
const size_t cur_pages =
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.TrackOptimizedAllocation(cur_address, cur_pages);
manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
// Advance.
cur_address += cur_pages * PageSize;
@@ -426,17 +455,86 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
return total_management_size;
}
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
auto optimize_pa =
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
}
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
UNREACHABLE();
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages) {
auto optimize_pa =
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
// Get the range we're tracking.
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
// Track.
while (offset <= last) {
// Mark the page as not being optimized-allocated.
optimize_map[offset / Common::BitSize<u64>()] &=
~(u64(1) << (offset % Common::BitSize<u64>()));
offset++;
}
}
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages,
u8 fill_pattern) {
UNREACHABLE();
void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages) {
auto optimize_pa =
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
// Get the range we're tracking.
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
// Track.
while (offset <= last) {
// Mark the page as being optimized-allocated.
optimize_map[offset / Common::BitSize<u64>()] |=
(u64(1) << (offset % Common::BitSize<u64>()));
offset++;
}
}
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages, u8 fill_pattern) {
auto& device_memory = kernel.System().DeviceMemory();
auto optimize_pa =
KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
// We want to return whether any pages were newly allocated.
bool any_new = false;
// Get the range we're processing.
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
// Process.
while (offset <= last) {
// Check if the page has been optimized-allocated before.
if ((optimize_map[offset / Common::BitSize<u64>()] &
(u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
// If not, it's new.
any_new = true;
// Fill the page.
auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
}
offset++;
}
// Return the number of pages we processed.
return any_new;
}
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {

View File

@@ -216,14 +216,14 @@ private:
m_heap.SetInitialUsedSize(reserved_size);
}
void InitializeOptimizedMemory() {
UNIMPLEMENTED();
}
void InitializeOptimizedMemory(KernelCore& kernel);
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages);
void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const {
return m_pool;

View File

@@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() ==
constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory =
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(
KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown =
KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(
KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() ==
(0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() ==
(0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_LinearMapped));
constexpr inline auto KMemoryRegionType_DramReservedEarly =
KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
@@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition =
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() ==
(0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(
// UNUSED: .Derive(4, 1);
// UNUSED: .Derive(4, 2);
constexpr inline const auto KMemoryRegionType_DramPoolManagement =
KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(
KMemoryRegionAttr_CarveoutProtected);
constexpr inline auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
constexpr inline const auto KMemoryRegionType_DramUserPool =
KMemoryRegionType_DramPoolPartition.Derive(4, 3);
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() ==
(0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
(0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool.GetValue() ==
(0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline auto KMemoryRegionType_DramApplicationPool =
KMemoryRegionType_DramUserPool.Derive(4, 0);
@@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool =
constexpr inline auto KMemoryRegionType_DramSystemPool =
KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool.GetValue() ==
(0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool.GetValue() ==
(0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() ==
(0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
(0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool.GetValue() ==
(0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
(0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap |
KMemoryRegionAttr_CarveoutProtected));
constexpr inline auto KMemoryRegionType_VirtualDramHeapBase =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer =
KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
// UNUSED: .DeriveSparse(2, 2, 0);
constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
// UNUSED: .Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug =
KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown =
KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32));
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92));
constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory =
KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr inline auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A);
// UNUSED: .Derive(4, 3);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool =
KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A);
// NOTE: For unknown reason, the pools are derived out-of-order here.
// It's worth eventually trying to understand why Nintendo made this choice.
// UNUSED: .Derive(6, 0);
// UNUSED: .Derive(6, 1);
constexpr inline auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
constexpr inline auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool =
KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A);
static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A);
constexpr inline auto KMemoryRegionType_ArchDeviceBase =
KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
@@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp =
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View File

@@ -183,12 +183,17 @@ private:
class KScopedPageGroup {
public:
explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) {
explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) {
if (m_pg) {
m_pg->Open();
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {}
explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true)
: KScopedPageGroup(std::addressof(gp), not_first) {}
~KScopedPageGroup() {
if (m_pg) {
m_pg->Close();

View File

@@ -82,14 +82,14 @@ public:
using namespace Common::Literals;
constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
switch (as_type) {
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
case Svc::CreateProcessFlag::AddressSpace32Bit:
case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
return 32;
case FileSys::ProgramAddressSpaceType::Is36Bit:
case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
return 36;
case FileSys::ProgramAddressSpaceType::Is39Bit:
case Svc::CreateProcessFlag::AddressSpace64Bit:
return 39;
default:
ASSERT(false);
@@ -105,7 +105,7 @@ KPageTable::KPageTable(Core::System& system_)
KPageTable::~KPageTable() = default;
Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
bool enable_das_merge, bool from_back,
KMemoryManager::Pool pool, KProcessAddress code_addr,
size_t code_size, KSystemResource* system_resource,
@@ -133,7 +133,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
ASSERT(code_addr + code_size - 1 <= end - 1);
// Adjust heap/alias size if we don't have an alias region
if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
heap_region_size += alias_region_size;
alias_region_size = 0;
}
@@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress
R_TRY(this->CheckMemoryStateContiguous(
std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::All, KMemoryAttribute::None));
KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None));
// Determine whether any pages being unmapped are code.
bool any_code_pages = false;
@@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
PageSize;
// While we have pages to map, map them.
while (map_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
{
// Create a page group for the current mapping range.
KPageGroup cur_pg(m_kernel, m_block_info_manager);
{
ON_RESULT_FAILURE_2 {
cur_pg.OpenFirst();
cur_pg.Close();
};
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
size_t remain_pages = map_pages;
while (remain_pages > 0) {
// Check if we're at the end of the physical block.
if (pg_pages == 0) {
// Ensure there are more pages to map.
ASSERT(pg_it != pg.end());
// Advance our physical block.
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
// Add whatever we can to the current block.
const size_t cur_pages = std::min(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr +
((pg_pages - cur_pages) * PageSize),
cur_pages));
// Advance.
remain_pages -= cur_pages;
pg_pages -= cur_pages;
}
}
// Map whatever we can.
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
OperationType::MapFirst, pg_phys_addr));
// Advance.
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
// Map the pages.
R_TRY(this->Operate(cur_address, map_pages, cur_pg,
OperationType::MapFirstGroup));
}
}
@@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) {
m_memory_block_manager.UpdateIfMatch(
std::addressof(allocator), address, size / PageSize, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
KMemoryPermission::UserReadWrite, KMemoryAttribute::None,
address == this->GetAliasRegionStart()
? KMemoryBlockDisableMergeAttribute::Normal
: KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
R_SUCCEED();
}
@@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
// Iterate over the memory, unmapping as we go.
auto it = m_memory_block_manager.FindIterator(cur_address);
const auto clear_merge_attr =
(it->GetState() == KMemoryState::Normal &&
it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address)
? KMemoryBlockDisableMergeAttribute::Normal
: KMemoryBlockDisableMergeAttribute::None;
while (true) {
// Check that the iterator is valid.
ASSERT(it != m_memory_block_manager.end());
@@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
clear_merge_attr);
// We succeeded.
R_SUCCEED();
@@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg,
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
@@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem
KScopedPageTableUpdater updater(this);
// Perform mapping operation.
const KPageProperties properties = {perm, state == KMemoryState::Io, false,
DisableMergeAttribute::DisableHead};
const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead};
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
// Update the blocks.
@@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
size_t num_allocator_blocks;
constexpr auto AttributeTestMask =
~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared);
R_TRY(this->CheckMemoryState(
std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr),
std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
const KMemoryState state_test_mask =
static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached))
? static_cast<u32>(KMemoryState::FlagCanChangeAttribute)
: 0) |
((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked))
? static_cast<u32>(KMemoryState::FlagCanPermissionLock)
: 0));
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size, state_test_mask, state_test_mask,
KMemoryPermission::None, KMemoryPermission::None,
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
// Create an update allocator.
Result allocator_result{ResultSuccess};
@@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// Determine the new attribute.
const KMemoryAttribute new_attr =
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
static_cast<KMemoryAttribute>(attr & mask)));
// Perform operation.
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
// If we need to, perform a change attribute operation.
if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) {
// Perform operation.
R_TRY(this->Operate(addr, num_pages, old_perm,
OperationType::ChangePermissionsAndRefreshAndFlush, 0));
}
// Update the blocks.
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm,
new_attr, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::None);
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages,
static_cast<KMemoryAttribute>(mask),
static_cast<KMemoryAttribute>(attr));
R_SUCCEED();
}
@@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
// Set whether the locked memory was io.
*out_is_io = old_state == KMemoryState::Io;
*out_is_io =
static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io;
R_SUCCEED();
}
@@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr
ASSERT(num_pages == page_group.GetNumPages());
switch (operation) {
case OperationType::MapGroup: {
case OperationType::MapGroup:
case OperationType::MapFirstGroup: {
// We want to maintain a new reference to every page in the group.
KScopedPageGroup spg(page_group);
KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
for (const auto& node : page_group) {
const size_t size{node.GetNumPages() * PageSize};
@@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize);
break;
}
case OperationType::MapFirst:
case OperationType::Map: {
ASSERT(map_addr);
ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize));
@@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
// Open references to pages, if we should.
if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) {
if (operation == OperationType::MapFirst) {
m_kernel.MemoryManager().OpenFirst(map_addr, num_pages);
} else {
m_kernel.MemoryManager().Open(map_addr, num_pages);
}
m_kernel.MemoryManager().Open(map_addr, num_pages);
}
break;
}
@@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis
}
case OperationType::ChangePermissions:
case OperationType::ChangePermissionsAndRefresh:
case OperationType::ChangePermissionsAndRefreshAndFlush:
break;
default:
ASSERT(false);
@@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
}
}
KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const {
KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return m_address_space_start;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
return m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
return m_alias_region_start;
case KMemoryState::Stack:
case Svc::MemoryState::Stack:
return m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
case Svc::MemoryState::Static:
case Svc::MemoryState::ThreadLocal:
return m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
return m_code_region_start;
default:
UNREACHABLE();
}
}
size_t KPageTable::GetRegionSize(KMemoryState state) const {
size_t KPageTable::GetRegionSize(Svc::MemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return m_address_space_end - m_address_space_start;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
return m_heap_region_end - m_heap_region_start;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
return m_alias_region_end - m_alias_region_start;
case KMemoryState::Stack:
case Svc::MemoryState::Stack:
return m_stack_region_end - m_stack_region_start;
case KMemoryState::Static:
case KMemoryState::ThreadLocal:
case Svc::MemoryState::Static:
case Svc::MemoryState::ThreadLocal:
return m_kernel_map_region_end - m_kernel_map_region_start;
case KMemoryState::Io:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return m_alias_code_region_end - m_alias_code_region_start;
case KMemoryState::Code:
case KMemoryState::CodeData:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
return m_code_region_end - m_code_region_start;
default:
UNREACHABLE();
}
}
bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const {
const KProcessAddress end = addr + size;
const KProcessAddress last = end - 1;
@@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr ||
m_alias_region_start == m_alias_region_end);
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
case Svc::MemoryState::Free:
case Svc::MemoryState::Kernel:
return is_in_region;
case KMemoryState::Io:
case KMemoryState::Static:
case KMemoryState::Code:
case KMemoryState::CodeData:
case KMemoryState::Shared:
case KMemoryState::AliasCode:
case KMemoryState::AliasCodeData:
case KMemoryState::Stack:
case KMemoryState::ThreadLocal:
case KMemoryState::Transfered:
case KMemoryState::SharedTransfered:
case KMemoryState::SharedCode:
case KMemoryState::GeneratedCode:
case KMemoryState::CodeOut:
case KMemoryState::Coverage:
case KMemoryState::Insecure:
case Svc::MemoryState::Io:
case Svc::MemoryState::Static:
case Svc::MemoryState::Code:
case Svc::MemoryState::CodeData:
case Svc::MemoryState::Shared:
case Svc::MemoryState::AliasCode:
case Svc::MemoryState::AliasCodeData:
case Svc::MemoryState::Stack:
case Svc::MemoryState::ThreadLocal:
case Svc::MemoryState::Transfered:
case Svc::MemoryState::SharedTransfered:
case Svc::MemoryState::SharedCode:
case Svc::MemoryState::GeneratedCode:
case Svc::MemoryState::CodeOut:
case Svc::MemoryState::Coverage:
case Svc::MemoryState::Insecure:
return is_in_region && !is_in_heap && !is_in_alias;
case KMemoryState::Normal:
case Svc::MemoryState::Normal:
ASSERT(is_in_heap);
return is_in_region && !is_in_alias;
case KMemoryState::Ipc:
case KMemoryState::NonSecureIpc:
case KMemoryState::NonDeviceIpc:
case Svc::MemoryState::Ipc:
case Svc::MemoryState::NonSecureIpc:
case Svc::MemoryState::NonDeviceIpc:
ASSERT(is_in_alias);
return is_in_region && !is_in_heap;
default:
@@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryBlockManager::const_iterator it,
KProcessAddress last_addr, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
// If the start address isn't aligned, we need a block.
const size_t blocks_for_start_align =
(Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
// Validate all blocks in the range have correct state.
const KMemoryState first_state = info.m_state;
const KMemoryPermission first_perm = info.m_permission;
@@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
info = it->GetMemoryInfo();
}
// If the end address isn't aligned, we need a block.
const size_t blocks_for_end_align =
(Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
// Write output state.
if (out_state != nullptr) {
*out_state = first_state;
@@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
if (out_attr != nullptr) {
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
}
// If the end address isn't aligned, we need a block.
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
const size_t blocks_for_end_align =
(Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress())
? 1
: 0;
*out_blocks_needed = blocks_for_end_align;
}
R_SUCCEED();
}
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
KMemoryState state, KMemoryPermission perm_mask,
KMemoryPermission perm, KMemoryAttribute attr_mask,
KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Check memory state.
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
// If the start address isn't aligned, we need a block.
if (out_blocks_needed != nullptr &&
Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
++(*out_blocks_needed);
}
R_SUCCEED();
}

View File

@@ -63,7 +63,7 @@ public:
explicit KPageTable(Core::System& system_);
~KPageTable();
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
KProcessAddress code_addr, size_t code_size,
KSystemResource* system_resource, KResourceLimit* resource_limit,
@@ -126,8 +126,6 @@ public:
return m_block_info_manager;
}
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment,
KPhysicalAddress phys_addr, KProcessAddress region_start,
size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
@@ -162,6 +160,21 @@ public:
void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size,
const KPageGroup& pg);
KProcessAddress GetRegionAddress(Svc::MemoryState state) const;
size_t GetRegionSize(Svc::MemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const;
KProcessAddress GetRegionAddress(KMemoryState state) const {
return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
size_t GetRegionSize(KMemoryState state) const {
return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
return this->CanContain(addr, size,
static_cast<Svc::MemoryState>(state & KMemoryState::Mask));
}
protected:
struct PageLinkedList {
private:
@@ -204,12 +217,13 @@ protected:
private:
enum class OperationType : u32 {
Map = 0,
MapFirst = 1,
MapGroup = 2,
MapGroup = 1,
MapFirstGroup = 2,
Unmap = 3,
ChangePermissions = 4,
ChangePermissionsAndRefresh = 5,
Separate = 6,
ChangePermissionsAndRefreshAndFlush = 6,
Separate = 7,
};
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
@@ -228,8 +242,6 @@ private:
Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm,
OperationType operation, KPhysicalAddress map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
@@ -250,6 +262,13 @@ private:
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KMemoryBlockManager::const_iterator it, KProcessAddress last_addr,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
KMemoryAttribute attr_mask, KMemoryAttribute attr,
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
KMemoryAttribute* out_attr, size_t* out_blocks_needed,
KProcessAddress addr, size_t size, KMemoryState state_mask,
@@ -381,7 +400,7 @@ public:
constexpr size_t GetAliasCodeRegionSize() const {
return m_alias_code_region_end - m_alias_code_region_start;
}
size_t GetNormalMemorySize() {
size_t GetNormalMemorySize() const {
KScopedLightLock lk(m_general_lock);
return GetHeapSize() + m_mapped_physical_memory_size;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,59 +1,23 @@
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include <list>
#include <map>
#include <string>
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_capabilities.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_page_table_manager.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_typed_address.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
namespace Core {
namespace Memory {
class Memory;
};
class System;
} // namespace Core
namespace FileSys {
class ProgramMetadata;
}
namespace Kernel {
class KernelCore;
class KResourceLimit;
class KThread;
class KSharedMemoryInfo;
class TLSPage;
struct CodeSet;
enum class MemoryRegion : u16 {
APPLICATION = 1,
SYSTEM = 2,
BASE = 3,
};
enum class ProcessActivity : u32 {
Runnable,
Paused,
};
enum class DebugWatchpointType : u8 {
None = 0,
Read = 1 << 0,
@@ -72,9 +36,6 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
explicit KProcess(KernelCore& kernel);
~KProcess() override;
enum class State {
Created = static_cast<u32>(Svc::ProcessState::Created),
CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
@@ -86,337 +47,83 @@ public:
DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
};
enum : u64 {
/// Lowest allowed process ID for a kernel initial process.
InitialKIPIDMin = 1,
/// Highest allowed process ID for a kernel initial process.
InitialKIPIDMax = 80,
using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
/// Lowest allowed process ID for a userland process.
ProcessIDMin = 81,
/// Highest allowed process ID for a userland process.
ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
};
static constexpr size_t AslrAlignment = 2_MiB;
// Used to determine how process IDs are assigned.
enum class ProcessType {
KernelInternal,
Userland,
};
public:
static constexpr u64 InitialProcessIdMin = 1;
static constexpr u64 InitialProcessIdMax = 0x50;
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
ProcessType type, KResourceLimit* res_limit);
/// Gets a reference to the process' page table.
KPageTable& GetPageTable() {
return m_page_table;
}
/// Gets const a reference to the process' page table.
const KPageTable& GetPageTable() const {
return m_page_table;
}
/// Gets a reference to the process' handle table.
KHandleTable& GetHandleTable() {
return m_handle_table;
}
/// Gets a const reference to the process' handle table.
const KHandleTable& GetHandleTable() const {
return m_handle_table;
}
/// Gets a reference to process's memory.
Core::Memory::Memory& GetMemory() const;
Result SignalToAddress(KProcessAddress address) {
return m_condition_var.SignalToAddress(address);
}
Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) {
return m_condition_var.WaitForAddress(handle, address, tag);
}
void SignalConditionVariable(u64 cv_key, int32_t count) {
return m_condition_var.Signal(cv_key, count);
}
Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) {
R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value,
s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
KProcessAddress GetProcessLocalRegionAddress() const {
return m_plr_address;
}
/// Gets the current status of the process
State GetState() const {
return m_state;
}
/// Gets the unique ID that identifies this particular process.
u64 GetProcessId() const {
return m_process_id;
}
/// Gets the program ID corresponding to this process.
u64 GetProgramId() const {
return m_program_id;
}
KProcessAddress GetEntryPoint() const {
return m_code_address;
}
/// Gets the resource limit descriptor for this process
KResourceLimit* GetResourceLimit() const;
/// Gets the ideal CPU core ID for this process
u8 GetIdealCoreId() const {
return m_ideal_core;
}
/// Checks if the specified thread priority is valid.
bool CheckThreadPriority(s32 prio) const {
return ((1ULL << prio) & GetPriorityMask()) != 0;
}
/// Gets the bitmask of allowed cores that this process' threads can run on.
u64 GetCoreMask() const {
return m_capabilities.GetCoreMask();
}
/// Gets the bitmask of allowed thread priorities.
u64 GetPriorityMask() const {
return m_capabilities.GetPriorityMask();
}
/// Gets the amount of secure memory to allocate for memory management.
u32 GetSystemResourceSize() const {
return m_system_resource_size;
}
/// Gets the amount of secure memory currently in use for memory management.
u32 GetSystemResourceUsage() const {
// On hardware, this returns the amount of system resource memory that has
// been used by the kernel. This is problematic for Yuzu to emulate, because
// system resource memory is used for page tables -- and yuzu doesn't really
// have a way to calculate how much memory is required for page tables for
// the current process at any given time.
// TODO: Is this even worth implementing? Games may retrieve this value via
// an SDK function that gets used + available system resource size for debug
// or diagnostic purposes. However, it seems unlikely that a game would make
// decisions based on how much system memory is dedicated to its page tables.
// Is returning a value other than zero wise?
return 0;
}
/// Whether this process is an AArch64 or AArch32 process.
bool Is64BitProcess() const {
return m_is_64bit_process;
}
bool IsSuspended() const {
return m_is_suspended;
}
void SetSuspended(bool suspended) {
m_is_suspended = suspended;
}
/// Gets the total running time of the process instance in ticks.
u64 GetCPUTimeTicks() const {
return m_total_process_running_time_ticks;
}
/// Updates the total running time, adding the given ticks to it.
void UpdateCPUTimeTicks(u64 ticks) {
m_total_process_running_time_ticks += ticks;
}
/// Gets the process schedule count, used for thread yielding
s64 GetScheduledCount() const {
return m_schedule_count;
}
/// Increments the process schedule count, used for thread yielding.
void IncrementScheduledCount() {
++m_schedule_count;
}
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
m_running_threads[core] = thread;
m_running_thread_idle_counts[core] = idle_count;
}
void ClearRunningThread(KThread* thread) {
for (size_t i = 0; i < m_running_threads.size(); ++i) {
if (m_running_threads[i] == thread) {
m_running_threads[i] = nullptr;
}
}
}
[[nodiscard]] KThread* GetRunningThread(s32 core) const {
return m_running_threads[core];
}
bool ReleaseUserException(KThread* thread);
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
return m_pinned_threads[core_id];
}
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
u64 GetRandomEntropy(std::size_t index) const {
return m_random_entropy.at(index);
}
/// Retrieves the total physical memory available to this process in bytes.
u64 GetTotalPhysicalMemoryAvailable();
/// Retrieves the total physical memory available to this process in bytes,
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
/// Retrieves the total physical memory used by this process in bytes.
u64 GetTotalPhysicalMemoryUsed();
/// Retrieves the total physical memory used by this process in bytes,
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
/// Gets the list of all threads created with this process as their owner.
std::list<KThread*>& GetThreadList() {
return m_thread_list;
}
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
void RegisterThread(KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
void UnregisterThread(KThread* thread);
/// Retrieves the number of available threads for this process.
u64 GetFreeThreadCount() const;
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
/// terminated process, then ResultInvalidState will be returned.
///
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ResultInvalidState will be
/// returned.
Result Reset();
/**
* Loads process-specifics configuration info with metadata provided
* by an executable.
*
* @param metadata The provided metadata to load process specific info from.
*
* @returns ResultSuccess if all relevant metadata was able to be
* loaded and parsed. Otherwise, an error code is returned.
*/
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
bool is_hbl);
/**
* Starts the main application thread for this process.
*
* @param main_thread_priority The priority for the main thread.
* @param stack_size The stack size for the main thread in bytes.
*/
void Run(s32 main_thread_priority, u64 stack_size);
/**
* Prepares a process for termination by stopping all of its threads
* and clearing any other resources.
*/
void PrepareForTermination();
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
bool IsInitialized() const override {
return m_is_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
u64 GetId() const override {
return GetProcessId();
}
bool IsHbl() const {
return m_is_hbl;
}
bool IsSignaled() const override;
void DoWorkerTaskImpl();
Result SetActivity(ProcessActivity activity);
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
KLightLock& GetStateLock() {
return m_state_lock;
}
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
[[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
// Frees a used TLS slot identified by the given address
Result DeleteThreadLocalRegion(KProcessAddress addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
return m_watchpoints;
}
const std::string& GetName() {
return name;
}
static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
private:
using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
private:
KPageTable m_page_table;
std::atomic<size_t> m_used_kernel_memory_size{};
TLPTree m_fully_used_tlp_tree{};
TLPTree m_partially_used_tlp_tree{};
s32 m_ideal_core_id{};
KResourceLimit* m_resource_limit{};
KSystemResource* m_system_resource{};
size_t m_memory_release_hint{};
State m_state{};
KLightLock m_state_lock;
KLightLock m_list_lock;
KConditionVariable m_cond_var;
KAddressArbiter m_address_arbiter;
std::array<u64, 4> m_entropy{};
bool m_is_signaled{};
bool m_is_initialized{};
bool m_is_application{};
bool m_is_default_application_system_resource{};
bool m_is_hbl{};
std::array<char, 13> m_name{};
std::atomic<u16> m_num_running_threads{};
Svc::CreateProcessFlag m_flags{};
KMemoryManager::Pool m_memory_pool{};
s64 m_schedule_count{};
KCapabilities m_capabilities{};
u64 m_program_id{};
u64 m_process_id{};
KProcessAddress m_code_address{};
size_t m_code_size{};
size_t m_main_thread_stack_size{};
size_t m_max_process_memory{};
u32 m_version{};
KHandleTable m_handle_table;
KProcessAddress m_plr_address{};
KThread* m_exception_thread{};
ThreadList m_thread_list{};
SharedMemoryInfoList m_shared_memory_list{};
bool m_is_suspended{};
bool m_is_immortal{};
bool m_is_handle_table_initialized{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
std::map<KProcessAddress, u64> m_debug_page_refcounts{};
std::atomic<s64> m_cpu_time{};
std::atomic<s64> m_num_process_switches{};
std::atomic<s64> m_num_thread_switches{};
std::atomic<s64> m_num_fpu_switches{};
std::atomic<s64> m_num_supervisor_calls{};
std::atomic<s64> m_num_ipc_messages{};
std::atomic<s64> m_num_ipc_replies{};
std::atomic<s64> m_num_ipc_receives{};
private:
Result StartTermination();
void FinishTermination();
void PinThread(s32 core_id, KThread* thread) {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
ASSERT(thread != nullptr);
@@ -431,6 +138,395 @@ private:
m_pinned_threads[core_id] = nullptr;
}
public:
explicit KProcess(KernelCore& kernel);
~KProcess() override;
Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
bool is_real);
Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
std::span<const u32> caps, KResourceLimit* res_limit,
KMemoryManager::Pool pool, bool immortal);
Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
KResourceLimit* res_limit, KMemoryManager::Pool pool);
void Exit();
const char* GetName() const {
return m_name.data();
}
u64 GetProgramId() const {
return m_program_id;
}
u64 GetProcessId() const {
return m_process_id;
}
State GetState() const {
return m_state;
}
u64 GetCoreMask() const {
return m_capabilities.GetCoreMask();
}
u64 GetPhysicalCoreMask() const {
return m_capabilities.GetPhysicalCoreMask();
}
u64 GetPriorityMask() const {
return m_capabilities.GetPriorityMask();
}
s32 GetIdealCoreId() const {
return m_ideal_core_id;
}
void SetIdealCoreId(s32 core_id) {
m_ideal_core_id = core_id;
}
bool CheckThreadPriority(s32 prio) const {
return ((1ULL << prio) & this->GetPriorityMask()) != 0;
}
u32 GetCreateProcessFlags() const {
return static_cast<u32>(m_flags);
}
bool Is64Bit() const {
return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
}
KProcessAddress GetEntryPoint() const {
return m_code_address;
}
size_t GetMainStackSize() const {
return m_main_thread_stack_size;
}
KMemoryManager::Pool GetMemoryPool() const {
return m_memory_pool;
}
u64 GetRandomEntropy(size_t i) const {
return m_entropy[i];
}
bool IsApplication() const {
return m_is_application;
}
bool IsDefaultApplicationSystemResource() const {
return m_is_default_application_system_resource;
}
bool IsSuspended() const {
return m_is_suspended;
}
void SetSuspended(bool suspended) {
m_is_suspended = suspended;
}
Result Terminate();
bool IsTerminated() const {
return m_state == State::Terminated;
}
bool IsPermittedSvc(u32 svc_id) const {
return m_capabilities.IsPermittedSvc(svc_id);
}
bool IsPermittedInterrupt(s32 interrupt_id) const {
return m_capabilities.IsPermittedInterrupt(interrupt_id);
}
bool IsPermittedDebug() const {
return m_capabilities.IsPermittedDebug();
}
bool CanForceDebug() const {
return m_capabilities.CanForceDebug();
}
bool IsHbl() const {
return m_is_hbl;
}
Kernel::KMemoryManager::Direction GetAllocateOption() const {
// TODO: property of the KPageTableBase
return KMemoryManager::Direction::FromFront;
}
ThreadList& GetThreadList() {
return m_thread_list;
}
const ThreadList& GetThreadList() const {
return m_thread_list;
}
bool EnterUserException();
bool LeaveUserException();
bool ReleaseUserException(KThread* thread);
KThread* GetPinnedThread(s32 core_id) const {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
return m_pinned_threads[core_id];
}
const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
return m_capabilities.GetSvcPermissions();
}
KResourceLimit* GetResourceLimit() const {
return m_resource_limit;
}
bool ReserveResource(Svc::LimitableResource which, s64 value);
bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
void ReleaseResource(Svc::LimitableResource which, s64 value);
void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
KLightLock& GetStateLock() {
return m_state_lock;
}
KLightLock& GetListLock() {
return m_list_lock;
}
KPageTable& GetPageTable() {
return m_page_table;
}
const KPageTable& GetPageTable() const {
return m_page_table;
}
KHandleTable& GetHandleTable() {
return m_handle_table;
}
const KHandleTable& GetHandleTable() const {
return m_handle_table;
}
size_t GetUsedUserPhysicalMemorySize() const;
size_t GetTotalUserPhysicalMemorySize() const;
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
size_t GetTotalNonSystemUserPhysicalMemorySize() const;
Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
Result CreateThreadLocalRegion(KProcessAddress* out);
Result DeleteThreadLocalRegion(KProcessAddress addr);
KProcessAddress GetProcessLocalRegionAddress() const {
return m_plr_address;
}
KThread* GetExceptionThread() const {
return m_exception_thread;
}
void AddCpuTime(s64 diff) {
m_cpu_time += diff;
}
s64 GetCpuTime() {
return m_cpu_time.load();
}
s64 GetScheduledCount() const {
return m_schedule_count;
}
void IncrementScheduledCount() {
++m_schedule_count;
}
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
size_t GetRequiredSecureMemorySizeNonDefault() const {
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
return secure_system_resource->CalculateRequiredSecureMemorySize();
}
return 0;
}
size_t GetRequiredSecureMemorySize() const {
if (m_system_resource->IsSecureResource()) {
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
return secure_system_resource->CalculateRequiredSecureMemorySize();
}
return 0;
}
size_t GetTotalSystemResourceSize() const {
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
return secure_system_resource->GetSize();
}
return 0;
}
size_t GetUsedSystemResourceSize() const {
if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
return secure_system_resource->GetUsedSize();
}
return 0;
}
void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
m_running_threads[core] = thread;
m_running_thread_idle_counts[core] = idle_count;
m_running_thread_switch_counts[core] = switch_count;
}
void ClearRunningThread(KThread* thread) {
for (size_t i = 0; i < m_running_threads.size(); ++i) {
if (m_running_threads[i] == thread) {
m_running_threads[i] = nullptr;
}
}
}
const KSystemResource& GetSystemResource() const {
return *m_system_resource;
}
const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
return m_system_resource->GetMemoryBlockSlabManager();
}
const KBlockInfoManager& GetBlockInfoManager() const {
return m_system_resource->GetBlockInfoManager();
}
const KPageTableManager& GetPageTableManager() const {
return m_system_resource->GetPageTableManager();
}
KThread* GetRunningThread(s32 core) const {
return m_running_threads[core];
}
u64 GetRunningThreadIdleCount(s32 core) const {
return m_running_thread_idle_counts[core];
}
u64 GetRunningThreadSwitchCount(s32 core) const {
return m_running_thread_switch_counts[core];
}
void RegisterThread(KThread* thread);
void UnregisterThread(KThread* thread);
Result Run(s32 priority, size_t stack_size);
Result Reset();
void SetDebugBreak() {
if (m_state == State::RunningAttached) {
this->ChangeState(State::DebugBreak);
}
}
void SetAttached() {
if (m_state == State::DebugBreak) {
this->ChangeState(State::RunningAttached);
}
}
Result SetActivity(Svc::ProcessActivity activity);
void PinCurrentThread();
void UnpinCurrentThread();
void UnpinThread(KThread* thread);
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
return m_cond_var.Signal(cv_key, count);
}
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
static void Switch(KProcess* cur_process, KProcess* next_process);
public:
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
return m_watchpoints;
}
public:
Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
bool is_hbl);
void LoadModule(CodeSet code_set, KProcessAddress base_addr);
Core::Memory::Memory& GetMemory() const;
public:
// Overridden parent functions.
bool IsInitialized() const override {
return m_is_initialized;
}
static void PostDestroy(uintptr_t arg) {}
void Finalize() override;
u64 GetIdImpl() const {
return this->GetProcessId();
}
u64 GetId() const override {
return this->GetIdImpl();
}
virtual bool IsSignaled() const override {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
return m_is_signaled;
}
void DoWorkerTaskImpl();
private:
void ChangeState(State new_state) {
if (m_state != new_state) {
m_state = new_state;
m_is_signaled = true;
this->NotifyAvailable();
}
}
Result InitializeHandleTable(s32 size) {
// Try to initialize the handle table.
R_TRY(m_handle_table.Initialize(size));
// We succeeded, so note that we did.
m_is_handle_table_initialized = true;
R_SUCCEED();
}
void FinalizeHandleTable() {
// Finalize the table.
m_handle_table.Finalize();
@@ -438,118 +534,6 @@ private:
// Note that the table is finalized.
m_is_handle_table_initialized = false;
}
void ChangeState(State new_state);
/// Allocates the main thread stack for the process, given the stack size in bytes.
Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
KPageTable m_page_table;
/// Current status of the process
State m_state{};
/// The ID of this process
u64 m_process_id = 0;
/// Title ID corresponding to the process
u64 m_program_id = 0;
/// Specifies additional memory to be reserved for the process's memory management by the
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
/// instead of using the normal global page tables/memory block management.
u32 m_system_resource_size = 0;
/// Resource limit descriptor for this process
KResourceLimit* m_resource_limit{};
KVirtualAddress m_system_resource_address{};
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 m_ideal_core = 0;
/// Contains the parsed process capability descriptors.
ProcessCapabilities m_capabilities;
/// Whether or not this process is AArch64, or AArch32.
/// By default, we currently assume this is true, unless otherwise
/// specified by metadata provided to the process during loading.
bool m_is_64bit_process = true;
/// Total running time for the process in ticks.
std::atomic<u64> m_total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
KHandleTable m_handle_table;
/// Per-process address arbiter.
KAddressArbiter m_address_arbiter;
/// The per-process mutex lock instance used for handling various
/// forms of services, such as lock arbitration, and condition
/// variable related facilities.
KConditionVariable m_condition_var;
/// Address indicating the location of the process' dedicated TLS region.
KProcessAddress m_plr_address = 0;
/// Address indicating the location of the process's entry point.
KProcessAddress m_code_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<KThread*> m_thread_list;
/// List of shared memory that are running with this process as their owner.
std::list<KSharedMemoryInfo*> m_shared_memory_list;
/// Address of the top of the main thread's stack
KProcessAddress m_main_thread_stack_top{};
/// Size of the main thread's stack
std::size_t m_main_thread_stack_size{};
/// Memory usage capacity for the process
std::size_t m_memory_usage_capacity{};
/// Process total image size
std::size_t m_image_size{};
/// Schedule count of this process
s64 m_schedule_count{};
size_t m_memory_release_hint{};
std::string name{};
bool m_is_signaled{};
bool m_is_suspended{};
bool m_is_immortal{};
bool m_is_handle_table_initialized{};
bool m_is_initialized{};
bool m_is_hbl{};
std::atomic<u16> m_num_running_threads{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
std::map<KProcessAddress, u64> m_debug_page_refcounts;
KThread* m_exception_thread{};
KLightLock m_state_lock;
KLightLock m_list_lock;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
TLPTree m_fully_used_tlp_tree;
TLPTree m_partially_used_tlp_tree;
};
} // namespace Kernel

View File

@@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
}
} else {
m_state.idle_count++;
@@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
cur_process->UpdateCPUTimeTicks(tick_diff);
cur_process->AddCpuTime(tick_diff);
}
m_last_context_switch_time = cur_tick;

View File

@@ -1,25 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_system_resource.h"
namespace Kernel {
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
// Set members.
m_resource_limit = resource_limit;
m_resource_size = size;
m_resource_pool = pool;
// Determine required size for our secure resource.
const size_t secure_size = this->CalculateRequiredSecureMemorySize();
// Reserve memory for our secure resource.
KScopedResourceReservation memory_reservation(
m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate secure memory.
R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
m_resource_size, static_cast<u32>(m_resource_pool)));
ASSERT(m_resource_address != 0);
// Ensure we clean up the secure memory, if we fail past this point.
ON_RESULT_FAILURE {
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
static_cast<u32>(m_resource_pool));
};
// Check that our allocation is bigger than the reference counts needed for it.
const size_t rc_size =
Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
// Get resource pointer.
KPhysicalAddress resource_paddr =
KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
auto* resource =
m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
// Initialize slab heaps.
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
PageSize);
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
// Initialize managers.
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
std::addressof(m_page_table_heap));
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
std::addressof(m_memory_block_heap));
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
std::addressof(m_block_info_heap));
// Set our managers.
this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
// Commit the memory reservation.
memory_reservation.Commit();
// Open reference to our resource limit.
m_resource_limit->Open();
// Set ourselves as initialized.
m_is_initialized = true;
R_SUCCEED();
}
void KSecureSystemResource::Finalize() {
// Unimplemented
UNREACHABLE();
// Check that we have no outstanding allocations.
ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
ASSERT(m_block_info_manager.GetUsed() == 0);
ASSERT(m_page_table_manager.GetUsed() == 0);
// Free our secure memory.
KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
static_cast<u32>(m_resource_pool));
// Release the memory reservation.
m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
this->CalculateRequiredSecureMemorySize());
// Close reference to our resource limit.
m_resource_limit->Close();
}
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
KMemoryManager::Pool pool) {
// Unimplemented
UNREACHABLE();
return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
}
} // namespace Kernel

View File

@@ -122,16 +122,15 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
case ThreadType::Main:
ASSERT(arg == 0);
[[fallthrough]];
case ThreadType::HighPriority:
[[fallthrough]];
case ThreadType::Dummy:
[[fallthrough]];
case ThreadType::User:
ASSERT(((owner == nullptr) ||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
(owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
break;
case ThreadType::HighPriority:
case ThreadType::Dummy:
break;
case ThreadType::Kernel:
UNIMPLEMENTED();
break;
@@ -216,6 +215,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
// Setup the TLS, if needed.
if (type == ThreadType::User) {
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
owner->GetMemory().ZeroBlock(m_tls_address, Svc::ThreadLocalRegionSize);
}
m_parent = owner;
@@ -403,7 +403,7 @@ void KThread::StartTermination() {
if (m_parent != nullptr) {
m_parent->ReleaseUserException(this);
if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
m_parent->UnpinCurrentThread(m_core_id);
m_parent->UnpinCurrentThread();
}
}
@@ -415,10 +415,6 @@ void KThread::StartTermination() {
m_parent->ClearRunningThread(this);
}
// Signal.
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
// Clear previous thread in KScheduler.
KScheduler::ClearPreviousThread(m_kernel, this);
@@ -437,6 +433,13 @@ void KThread::FinishTermination() {
}
}
// Acquire the scheduler lock.
KScopedSchedulerLock sl{m_kernel};
// Signal.
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
// Close the thread.
this->Close();
}
@@ -820,7 +823,7 @@ void KThread::CloneFpuStatus() {
ASSERT(this->GetOwnerProcess() != nullptr);
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
if (this->GetOwnerProcess()->Is64BitProcess()) {
if (this->GetOwnerProcess()->Is64Bit()) {
// Clone FPSR and FPCR.
ThreadContext64 cur_ctx{};
m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
@@ -923,7 +926,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
// If we're not terminating, get the thread's user context.
if (!this->IsTerminationRequested()) {
if (m_parent->Is64BitProcess()) {
if (m_parent->Is64Bit()) {
// Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
auto context = GetContext64();
context.pstate &= 0xFF0FFE20;
@@ -1174,6 +1177,9 @@ Result KThread::Run() {
owner->IncrementRunningThreadCount();
}
// Open a reference, now that we're running.
this->Open();
// Set our state and finish.
this->SetState(ThreadState::Runnable);

View File

@@ -721,6 +721,7 @@ private:
// For core KThread implementation
ThreadContext32 m_thread_context_32{};
ThreadContext64 m_thread_context_64{};
Common::IntrusiveListNode m_process_list_node;
Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
s32 m_priority{};
using ConditionVariableThreadTreeTraits =

View File

@@ -101,35 +101,31 @@ struct KernelCore::Impl {
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
cores[core_id]->Initialize((*application_process).Is64BitProcess());
cores[core_id]->Initialize((*application_process).Is64Bit());
system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
}
}
void CloseApplicationProcess() {
KProcess* old_process = application_process.exchange(nullptr);
if (old_process == nullptr) {
return;
}
// old_process->Close();
// TODO: The process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
old_process->Finalize();
old_process->Destroy();
void TerminateApplicationProcess() {
application_process.load()->Terminate();
}
void Shutdown() {
is_shutting_down.store(true, std::memory_order_relaxed);
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
process_list.clear();
CloseServices();
auto* old_process = application_process.exchange(nullptr);
if (old_process) {
old_process->Close();
}
process_list.clear();
next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = KProcess::ProcessIDMin;
next_kernel_process_id = KProcess::InitialProcessIdMin;
next_user_process_id = KProcess::ProcessIdMin;
next_thread_id = 1;
global_handle_table->Finalize();
@@ -176,8 +172,6 @@ struct KernelCore::Impl {
}
}
CloseApplicationProcess();
// Track kernel objects that were not freed on shutdown
{
std::scoped_lock lk{registered_objects_lock};
@@ -344,6 +338,8 @@ struct KernelCore::Impl {
// Create the system page table managers.
app_system_resource = std::make_unique<KSystemResource>(kernel);
sys_system_resource = std::make_unique<KSystemResource>(kernel);
KAutoObject::Create(std::addressof(*app_system_resource));
KAutoObject::Create(std::addressof(*sys_system_resource));
// Set the managers for the system resources.
app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
@@ -623,14 +619,33 @@ struct KernelCore::Impl {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
// Insert a physical region for the secure applet memory.
const auto secure_applet_end_phys_addr =
slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize,
KMemoryRegionType_DramKernelSecureAppletMemory));
}
// Insert a physical region for the unknown debug2 region.
constexpr size_t SecureUnknownRegionSize = 0;
const size_t secure_unknown_size = SecureUnknownRegionSize;
const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size;
if constexpr (SecureUnknownRegionSize > 0) {
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(secure_applet_end_phys_addr), secure_unknown_size,
KMemoryRegionType_DramKernelSecureUnknown));
}
// Determine size available for kernel page table heaps, requiring > 8 MB.
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr;
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
GetInteger(slab_end_phys_addr), page_table_heap_size,
GetInteger(secure_unknown_end_phys_addr), page_table_heap_size,
KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
@@ -773,8 +788,8 @@ struct KernelCore::Impl {
std::mutex registered_in_use_objects_lock;
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
std::atomic<u64> next_thread_id{1};
// Lists all processes that exist in the current session.
@@ -905,10 +920,6 @@ const KProcess* KernelCore::ApplicationProcess() const {
return impl->application_process;
}
void KernelCore::CloseApplicationProcess() {
impl->CloseApplicationProcess();
}
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
return impl->process_list;
}
@@ -1109,8 +1120,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
std::function<void()> func) {
// Make a new process.
KProcess* process = KProcess::Create(*this);
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
GetSystemResourceLimit())));
ASSERT(R_SUCCEEDED(
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1137,8 +1148,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
// Make a new process.
KProcess* process = KProcess::Create(*this);
ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
GetSystemResourceLimit())));
ASSERT(R_SUCCEEDED(
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
// Ensure that we don't hold onto any extra references.
SCOPE_EXIT({ process->Close(); });
@@ -1247,7 +1258,8 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
void KernelCore::SuspendApplication(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
const auto activity =
should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
// Get the application process.
KScopedAutoObject<KProcess> process = ApplicationProcess();
@@ -1281,6 +1293,8 @@ void KernelCore::SuspendApplication(bool suspended) {
}
void KernelCore::ShutdownCores() {
impl->TerminateApplicationProcess();
KScopedSchedulerLock lk{*this};
for (auto* thread : impl->shutdown_threads) {

View File

@@ -134,9 +134,6 @@ public:
/// Retrieves a const pointer to the application process.
const KProcess* ApplicationProcess() const;
/// Closes the application process.
void CloseApplicationProcess();
/// Retrieves the list of processes.
const std::vector<KProcess*>& GetProcessList() const;

View File

@@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);

View File

@@ -86,20 +86,19 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalMemorySize:
*result = process->GetTotalPhysicalMemoryAvailable();
*result = process->GetTotalUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedMemorySize:
*result = process->GetTotalPhysicalMemoryUsed();
*result = process->GetUsedUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::SystemResourceSizeTotal:
*result = process->GetSystemResourceSize();
*result = process->GetTotalSystemResourceSize();
R_SUCCEED();
case InfoType::SystemResourceSizeUsed:
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
*result = process->GetSystemResourceUsage();
*result = process->GetUsedSystemResourceSize();
R_SUCCEED();
case InfoType::ProgramId:
@@ -111,20 +110,29 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::TotalNonSystemMemorySize:
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
*result = process->GetTotalNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::UsedNonSystemMemorySize:
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
*result = process->GetUsedNonSystemUserPhysicalMemorySize();
R_SUCCEED();
case InfoType::IsApplication:
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
*result = true;
*result = process->IsApplication();
R_SUCCEED();
case InfoType::FreeThreadCount:
*result = process->GetFreeThreadCount();
if (KResourceLimit* resource_limit = process->GetResourceLimit();
resource_limit != nullptr) {
const auto current_value =
resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
const auto limit_value =
resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
*result = limit_value - current_value;
} else {
*result = 0;
}
R_SUCCEED();
default:
@@ -161,7 +169,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
case InfoType::RandomEntropy:
R_UNLESS(handle == 0, ResultInvalidHandle);
R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination);
R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
*result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
R_SUCCEED();

View File

@@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag));
R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
}
/// Unlock a mutex
@@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address));
R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
}
Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {

View File

@@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s
} // namespace
Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size,
perm);
// Validate address / size.
@@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask,
R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
// Validate the attribute and mask.
constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached);
constexpr u32 SupportedMask =
static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked);
R_UNLESS((mask | attr) == mask, ResultInvalidCombination);
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination);
// Check that permission locked is either being set or not masked.
R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) ==
(static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked),
ResultInvalidCombination);
// Validate that the region is in range for the current process.
auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()};
R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);

View File

@@ -46,7 +46,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
if (current_process->GetSystemResourceSize() == 0) {
if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}
@@ -95,7 +95,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
auto& page_table{current_process->GetPageTable()};
if (current_process->GetSystemResourceSize() == 0) {
if (current_process->GetTotalSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
R_THROW(ResultInvalidState);
}

View File

@@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
GetCurrentThread(kernel).ClearInterruptFlag();
// Unpin the current thread.
cur_process->UnpinCurrentThread(core_id);
cur_process->UnpinCurrentThread();
}
}

View File

@@ -85,10 +85,6 @@ Result StartThread(Core::System& system, Handle thread_handle) {
// Try to start the thread.
R_TRY(thread->Run());
// If we succeeded, persist a reference to the thread.
thread->Open();
system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
R_SUCCEED();
}
@@ -99,7 +95,6 @@ void ExitThread(Core::System& system) {
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
}
/// Sleep the current thread
@@ -260,7 +255,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
auto list_iter = thread_list.cbegin();
for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
memory.Write64(out_thread_ids, (*list_iter)->GetThreadId());
memory.Write64(out_thread_ids, list_iter->GetThreadId());
out_thread_ids += sizeof(u64);
}

View File

@@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) {
if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
Call64(system, imm);
} else {
Call32(system, imm);

View File

@@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 {
IpcLocked = (1 << 1),
DeviceShared = (1 << 2),
Uncached = (1 << 3),
PermissionLocked = (1 << 4),
};
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
@@ -603,13 +604,57 @@ enum class ProcessActivity : u32 {
Paused,
};
enum class CreateProcessFlag : u32 {
// Is 64 bit?
Is64Bit = (1 << 0),
// What kind of address space?
AddressSpaceShift = 1,
AddressSpaceMask = (7 << AddressSpaceShift),
AddressSpace32Bit = (0 << AddressSpaceShift),
AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
AddressSpace64Bit = (3 << AddressSpaceShift),
// Should JIT debug be done on crash?
EnableDebug = (1 << 4),
// Should ASLR be enabled for the process?
EnableAslr = (1 << 5),
// Is the process an application?
IsApplication = (1 << 6),
// 4.x deprecated: Should use secure memory?
DeprecatedUseSecureMemory = (1 << 7),
// 5.x+ Pool partition type.
PoolPartitionShift = 7,
PoolPartitionMask = (0xF << PoolPartitionShift),
PoolPartitionApplication = (0 << PoolPartitionShift),
PoolPartitionApplet = (1 << PoolPartitionShift),
PoolPartitionSystem = (2 << PoolPartitionShift),
PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
// 7.x+ Should memory allocation be optimized? This requires IsApplication.
OptimizeMemoryAllocation = (1 << 11),
// 11.x+ DisableDeviceAddressSpaceMerge.
DisableDeviceAddressSpaceMerge = (1 << 12),
// Mask of all flags.
All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
};
DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
struct CreateProcessParameter {
std::array<char, 12> name;
u32 version;
u64 program_id;
u64 code_address;
s32 code_num_pages;
u32 flags;
CreateProcessFlag flags;
Handle reslimit;
s32 system_resource_num_pages;
};

View File

@@ -49,7 +49,7 @@ public:
: ServiceFramework{system_, "IManagerForSystemService"} {
// clang-format off
static const FunctionInfo functions[] = {
{0, nullptr, "CheckAvailability"},
{0, &IManagerForSystemService::CheckAvailability, "CheckAvailability"},
{1, nullptr, "GetAccountId"},
{2, nullptr, "EnsureIdTokenCacheAsync"},
{3, nullptr, "LoadIdTokenCache"},
@@ -78,6 +78,13 @@ public:
RegisterHandlers(functions);
}
private:
void CheckAvailability(HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
};
// 3.0.0+
@@ -400,13 +407,13 @@ protected:
IPC::RequestParser rp{ctx};
const auto base = rp.PopRaw<ProfileBase>();
const auto user_data = ctx.ReadBuffer();
const auto image_data = ctx.ReadBuffer(1);
const auto image_data = ctx.ReadBufferA(0);
const auto user_data = ctx.ReadBufferX(0);
LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
Common::StringFromFixedZeroTerminatedBuffer(
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
base.timestamp, base.user_uuid.RawString());
LOG_INFO(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}",
Common::StringFromFixedZeroTerminatedBuffer(
reinterpret_cast<const char*>(base.username.data()), base.username.size()),
base.timestamp, base.user_uuid.RawString());
if (user_data.size() < sizeof(UserData)) {
LOG_ERROR(Service_ACC, "UserData buffer too small!");
@@ -837,6 +844,29 @@ void Module::Interface::InitializeApplicationInfoV2(HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
void Module::Interface::BeginUserRegistration(HLERequestContext& ctx) {
const auto user_id = Common::UUID::MakeRandom();
profile_manager->CreateNewUser(user_id, "yuzu");
LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(ResultSuccess);
rb.PushRaw(user_id);
}
void Module::Interface::CompleteUserRegistration(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
Common::UUID user_id = rp.PopRaw<Common::UUID>();
LOG_INFO(Service_ACC, "called, uuid={}", user_id.FormattedString());
profile_manager->WriteUserSaveFile();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void Module::Interface::GetProfileEditor(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
Common::UUID user_id = rp.PopRaw<Common::UUID>();
@@ -880,6 +910,17 @@ void Module::Interface::StoreSaveDataThumbnailApplication(HLERequestContext& ctx
StoreSaveDataThumbnail(ctx, uuid, tid);
}
void Module::Interface::GetBaasAccountManagerForSystemService(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto uuid = rp.PopRaw<Common::UUID>();
LOG_INFO(Service_ACC, "called, uuid=0x{}", uuid.RawString());
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);
rb.PushIpcInterface<IManagerForSystemService>(system, uuid);
}
void Module::Interface::StoreSaveDataThumbnailSystem(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto uuid = rp.PopRaw<Common::UUID>();

View File

@@ -33,10 +33,13 @@ public:
void TrySelectUserWithoutInteraction(HLERequestContext& ctx);
void IsUserAccountSwitchLocked(HLERequestContext& ctx);
void InitializeApplicationInfoV2(HLERequestContext& ctx);
void BeginUserRegistration(HLERequestContext& ctx);
void CompleteUserRegistration(HLERequestContext& ctx);
void GetProfileEditor(HLERequestContext& ctx);
void ListQualifiedUsers(HLERequestContext& ctx);
void ListOpenContextStoredUsers(HLERequestContext& ctx);
void StoreSaveDataThumbnailApplication(HLERequestContext& ctx);
void GetBaasAccountManagerForSystemService(HLERequestContext& ctx);
void StoreSaveDataThumbnailSystem(HLERequestContext& ctx);
private:

View File

@@ -23,7 +23,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
{99, nullptr, "DebugActivateOpenContextRetention"},
{100, nullptr, "GetUserRegistrationNotifier"},
{101, nullptr, "GetUserStateChangeNotifier"},
{102, nullptr, "GetBaasAccountManagerForSystemService"},
{102, &ACC_SU::GetBaasAccountManagerForSystemService, "GetBaasAccountManagerForSystemService"},
{103, nullptr, "GetBaasUserAvailabilityChangeNotifier"},
{104, nullptr, "GetProfileUpdateNotifier"},
{105, nullptr, "CheckNetworkServiceAvailabilityAsync"},
@@ -40,8 +40,8 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module_, std::shared_ptr<ProfileManager>
{152, nullptr, "LoadSignedDeviceIdentifierCacheForNintendoAccount"},
{190, nullptr, "GetUserLastOpenedApplication"},
{191, nullptr, "ActivateOpenContextHolder"},
{200, nullptr, "BeginUserRegistration"},
{201, nullptr, "CompleteUserRegistration"},
{200, &ACC_SU::BeginUserRegistration, "BeginUserRegistration"},
{201, &ACC_SU::CompleteUserRegistration, "CompleteUserRegistration"},
{202, nullptr, "CancelUserRegistration"},
{203, nullptr, "DeleteUser"},
{204, nullptr, "SetUserPosition"},

View File

@@ -96,9 +96,10 @@ public:
bool SetProfileBaseAndData(Common::UUID uuid, const ProfileBase& profile_new,
const UserData& data_new);
void WriteUserSaveFile();
private:
void ParseUserSaveFile();
void WriteUserSaveFile();
std::optional<std::size_t> AddToProfiles(const ProfileInfo& profile);
bool RemoveProfileAtIndex(std::size_t index);

View File

@@ -210,8 +210,8 @@ IDisplayController::IDisplayController(Core::System& system_)
{21, nullptr, "ClearAppletTransitionBuffer"},
{22, nullptr, "AcquireLastApplicationCaptureSharedBuffer"},
{23, nullptr, "ReleaseLastApplicationCaptureSharedBuffer"},
{24, nullptr, "AcquireLastForegroundCaptureSharedBuffer"},
{25, nullptr, "ReleaseLastForegroundCaptureSharedBuffer"},
{24, &IDisplayController::AcquireLastForegroundCaptureSharedBuffer, "AcquireLastForegroundCaptureSharedBuffer"},
{25, &IDisplayController::ReleaseLastForegroundCaptureSharedBuffer, "ReleaseLastForegroundCaptureSharedBuffer"},
{26, &IDisplayController::AcquireCallerAppletCaptureSharedBuffer, "AcquireCallerAppletCaptureSharedBuffer"},
{27, &IDisplayController::ReleaseCallerAppletCaptureSharedBuffer, "ReleaseCallerAppletCaptureSharedBuffer"},
{28, nullptr, "TakeScreenShotOfOwnLayerEx"},
@@ -239,6 +239,22 @@ void IDisplayController::TakeScreenShotOfOwnLayer(HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
void IDisplayController::AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(ResultSuccess);
rb.Push(1U);
rb.Push(0);
}
void IDisplayController::ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IDisplayController::AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
@@ -780,7 +796,9 @@ ILockAccessor::ILockAccessor(Core::System& system_)
lock_event = service_context.CreateEvent("ILockAccessor::LockEvent");
}
ILockAccessor::~ILockAccessor() = default;
ILockAccessor::~ILockAccessor() {
service_context.CloseEvent(lock_event);
};
void ILockAccessor::TryLock(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
@@ -893,7 +911,9 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system_,
msg_queue->PushMessage(AppletMessageQueue::AppletMessage::ChangeIntoForeground);
}
ICommonStateGetter::~ICommonStateGetter() = default;
ICommonStateGetter::~ICommonStateGetter() {
service_context.CloseEvent(sleep_lock_event);
};
void ICommonStateGetter::GetBootMode(HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called");
@@ -1557,7 +1577,7 @@ ILibraryAppletSelfAccessor::ILibraryAppletSelfAccessor(Core::System& system_)
{100, nullptr, "CreateGameMovieTrimmer"},
{101, nullptr, "ReserveResourceForMovieOperation"},
{102, nullptr, "UnreserveResourceForMovieOperation"},
{110, nullptr, "GetMainAppletAvailableUsers"},
{110, &ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers, "GetMainAppletAvailableUsers"},
{120, nullptr, "GetLaunchStorageInfoForDebug"},
{130, nullptr, "GetGpuErrorDetectedSystemEvent"},
{140, nullptr, "SetApplicationMemoryReservation"},
@@ -1652,6 +1672,25 @@ void ILibraryAppletSelfAccessor::GetCallerAppletIdentityInfo(HLERequestContext&
rb.PushRaw(applet_info);
}
void ILibraryAppletSelfAccessor::GetMainAppletAvailableUsers(HLERequestContext& ctx) {
const Service::Account::ProfileManager manager{};
bool is_empty{true};
s32 user_count{-1};
LOG_INFO(Service_AM, "called");
if (manager.GetUserCount() > 0) {
is_empty = false;
user_count = static_cast<s32>(manager.GetUserCount());
ctx.WriteBuffer(manager.GetAllUsers());
}
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(ResultSuccess);
rb.Push<u8>(is_empty);
rb.Push(user_count);
}
void ILibraryAppletSelfAccessor::PushInShowAlbum() {
const Applets::CommonArguments arguments{
.arguments_version = Applets::CommonArgumentVersion::Version3,

View File

@@ -124,6 +124,8 @@ public:
private:
void GetCallerAppletCaptureImageEx(HLERequestContext& ctx);
void TakeScreenShotOfOwnLayer(HLERequestContext& ctx);
void AcquireLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
void ReleaseLastForegroundCaptureSharedBuffer(HLERequestContext& ctx);
void AcquireCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
void ReleaseCallerAppletCaptureSharedBuffer(HLERequestContext& ctx);
};
@@ -345,6 +347,7 @@ private:
void GetLibraryAppletInfo(HLERequestContext& ctx);
void ExitProcessAndReturn(HLERequestContext& ctx);
void GetCallerAppletIdentityInfo(HLERequestContext& ctx);
void GetMainAppletAvailableUsers(HLERequestContext& ctx);
void PushInShowAlbum();
void PushInShowCabinetData();

View File

@@ -25,7 +25,9 @@ Cabinet::Cabinet(Core::System& system_, LibraryAppletMode applet_mode_,
service_context.CreateEvent("CabinetApplet:AvailabilityChangeEvent");
}
Cabinet::~Cabinet() = default;
Cabinet::~Cabinet() {
service_context.CloseEvent(availability_change_event);
};
void Cabinet::Initialize() {
Applet::Initialize();

View File

@@ -25,11 +25,12 @@ void LoopProcess(Core::System& system) {
server_manager->RegisterNamedService(
"caps:u", std::make_shared<IAlbumApplicationService>(system, album_manager));
server_manager->RegisterNamedService("caps:ss", std::make_shared<IScreenShotService>(system));
server_manager->RegisterNamedService(
"caps:ss", std::make_shared<IScreenShotService>(system, album_manager));
server_manager->RegisterNamedService("caps:sc",
std::make_shared<IScreenShotControlService>(system));
server_manager->RegisterNamedService("caps:su",
std::make_shared<IScreenShotApplicationService>(system));
server_manager->RegisterNamedService(
"caps:su", std::make_shared<IScreenShotApplicationService>(system, album_manager));
ServerManager::RunServer(std::move(server_manager));
}

View File

@@ -2,12 +2,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <sstream>
#include <stb_image.h>
#include <stb_image_resize.h>
#include "common/fs/file.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "common/stb.h"
#include "core/core.h"
#include "core/hle/service/caps/caps_manager.h"
#include "core/hle/service/caps/caps_result.h"
@@ -227,6 +226,49 @@ Result AlbumManager::LoadAlbumScreenShotThumbnail(
+static_cast<int>(out_image_output.height), decoder_options.flags);
}
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute,
std::span<const u8> image_data, u64 aruid) {
return SaveScreenShot(out_entry, attribute, {}, image_data, aruid);
}
Result AlbumManager::SaveScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute,
const ApplicationData& app_data, std::span<const u8> image_data,
u64 aruid) {
const u64 title_id = system.GetApplicationProcessProgramID();
const auto& user_clock = system.GetTimeManager().GetStandardUserSystemClockCore();
s64 posix_time{};
Result result = user_clock.GetCurrentTime(system, posix_time);
if (result.IsError()) {
return result;
}
const auto date = ConvertToAlbumDateTime(posix_time);
return SaveImage(out_entry, image_data, title_id, date);
}
Result AlbumManager::SaveEditedScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute,
const AlbumFileId& file_id,
std::span<const u8> image_data) {
const auto& user_clock = system.GetTimeManager().GetStandardUserSystemClockCore();
s64 posix_time{};
Result result = user_clock.GetCurrentTime(system, posix_time);
if (result.IsError()) {
return result;
}
const auto date = ConvertToAlbumDateTime(posix_time);
return SaveImage(out_entry, image_data, file_id.application_id, date);
}
Result AlbumManager::GetFile(std::filesystem::path& out_path, const AlbumFileId& file_id) const {
const auto file = album_files.find(file_id);
@@ -365,6 +407,47 @@ Result AlbumManager::LoadImage(std::span<u8> out_image, const std::filesystem::p
return ResultSuccess;
}
static void PNGToMemory(void* context, void* png, int len) {
std::vector<u8>* png_image = static_cast<std::vector<u8>*>(context);
png_image->reserve(len);
std::memcpy(png_image->data(), png, len);
}
Result AlbumManager::SaveImage(ApplicationAlbumEntry& out_entry, std::span<const u8> image,
u64 title_id, const AlbumFileDateTime& date) const {
const auto screenshot_path =
Common::FS::GetYuzuPathString(Common::FS::YuzuPath::ScreenshotsDir);
const std::string formatted_date =
fmt::format("{:04}-{:02}-{:02}_{:02}-{:02}-{:02}-{:03}", date.year, date.month, date.day,
date.hour, date.minute, date.second, 0);
const std::string file_path =
fmt::format("{}/{:016x}_{}.png", screenshot_path, title_id, formatted_date);
const Common::FS::IOFile db_file{file_path, Common::FS::FileAccessMode::Write,
Common::FS::FileType::BinaryFile};
std::vector<u8> png_image;
if (!stbi_write_png_to_func(PNGToMemory, &png_image, 1280, 720, STBI_rgb_alpha, image.data(),
0)) {
return ResultFileCountLimit;
}
if (db_file.Write(png_image) != png_image.size()) {
return ResultFileCountLimit;
}
out_entry = {
.size = png_image.size(),
.hash = {},
.datetime = date,
.storage = AlbumStorage::Sd,
.content = ContentType::Screenshot,
.unknown = 1,
};
return ResultSuccess;
}
AlbumFileDateTime AlbumManager::ConvertToAlbumDateTime(u64 posix_time) const {
Time::TimeZone::CalendarInfo calendar_date{};
const auto& time_zone_manager =

View File

@@ -58,6 +58,15 @@ public:
std::vector<u8>& out_image, const AlbumFileId& file_id,
const ScreenShotDecodeOption& decoder_options) const;
Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
std::span<const u8> image_data, u64 aruid);
Result SaveScreenShot(ApplicationAlbumEntry& out_entry, const ScreenShotAttribute& attribute,
const ApplicationData& app_data, std::span<const u8> image_data,
u64 aruid);
Result SaveEditedScreenShot(ApplicationAlbumEntry& out_entry,
const ScreenShotAttribute& attribute, const AlbumFileId& file_id,
std::span<const u8> image_data);
private:
static constexpr std::size_t NandAlbumFileLimit = 1000;
static constexpr std::size_t SdAlbumFileLimit = 10000;
@@ -67,6 +76,8 @@ private:
Result GetAlbumEntry(AlbumEntry& out_entry, const std::filesystem::path& path) const;
Result LoadImage(std::span<u8> out_image, const std::filesystem::path& path, int width,
int height, ScreenShotDecoderFlag flag) const;
Result SaveImage(ApplicationAlbumEntry& out_entry, std::span<const u8> image, u64 title_id,
const AlbumFileDateTime& date) const;
AlbumFileDateTime ConvertToAlbumDateTime(u64 posix_time) const;

View File

@@ -1,19 +1,25 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
#include "core/hle/service/caps/caps_manager.h"
#include "core/hle/service/caps/caps_types.h"
#include "core/hle/service/ipc_helpers.h"
#include "core/hle/service/caps/caps_ss.h"
namespace Service::Capture {
IScreenShotService::IScreenShotService(Core::System& system_)
: ServiceFramework{system_, "caps:ss"} {
IScreenShotService::IScreenShotService(Core::System& system_,
std::shared_ptr<AlbumManager> album_manager)
: ServiceFramework{system_, "caps:ss"}, manager{album_manager} {
// clang-format off
static const FunctionInfo functions[] = {
{201, nullptr, "SaveScreenShot"},
{202, nullptr, "SaveEditedScreenShot"},
{203, nullptr, "SaveScreenShotEx0"},
{203, &IScreenShotService::SaveScreenShotEx0, "SaveScreenShotEx0"},
{204, nullptr, "SaveEditedScreenShotEx0"},
{206, nullptr, "Unknown206"},
{206, &IScreenShotService::SaveEditedScreenShotEx1, "SaveEditedScreenShotEx1"},
{208, nullptr, "SaveScreenShotOfMovieEx1"},
{1000, nullptr, "Unknown1000"},
};
@@ -24,4 +30,65 @@ IScreenShotService::IScreenShotService(Core::System& system_)
IScreenShotService::~IScreenShotService() = default;
void IScreenShotService::SaveScreenShotEx0(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
ScreenShotAttribute attribute{};
u32 report_option{};
INSERT_PADDING_BYTES(0x4);
u64 applet_resource_user_id{};
};
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto image_data_buffer = ctx.ReadBuffer();
LOG_INFO(Service_Capture,
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
parameters.report_option, image_data_buffer.size(),
parameters.applet_resource_user_id);
ApplicationAlbumEntry entry{};
const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
void IScreenShotService::SaveEditedScreenShotEx1(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
ScreenShotAttribute attribute;
u64 width;
u64 height;
u64 thumbnail_width;
u64 thumbnail_height;
AlbumFileId file_id;
};
static_assert(sizeof(Parameters) == 0x78, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto application_data_buffer = ctx.ReadBuffer(0);
const auto image_data_buffer = ctx.ReadBuffer(1);
const auto thumbnail_image_data_buffer = ctx.ReadBuffer(2);
LOG_INFO(Service_Capture,
"called, width={}, height={}, thumbnail_width={}, thumbnail_height={}, "
"application_id={:016x}, storage={}, type={}, app_data_buffer_size={}, "
"image_data_buffer_size={}, thumbnail_image_buffer_size={}",
parameters.width, parameters.height, parameters.thumbnail_width,
parameters.thumbnail_height, parameters.file_id.application_id,
parameters.file_id.storage, parameters.file_id.type, application_data_buffer.size(),
image_data_buffer.size(), thumbnail_image_data_buffer.size());
ApplicationAlbumEntry entry{};
const auto result = manager->SaveEditedScreenShot(entry, parameters.attribute,
parameters.file_id, image_data_buffer);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
} // namespace Service::Capture

View File

@@ -13,8 +13,14 @@ namespace Service::Capture {
class IScreenShotService final : public ServiceFramework<IScreenShotService> {
public:
explicit IScreenShotService(Core::System& system_);
explicit IScreenShotService(Core::System& system_, std::shared_ptr<AlbumManager> album_manager);
~IScreenShotService() override;
private:
void SaveScreenShotEx0(HLERequestContext& ctx);
void SaveEditedScreenShotEx1(HLERequestContext& ctx);
std::shared_ptr<AlbumManager> manager;
};
} // namespace Service::Capture

View File

@@ -2,19 +2,22 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
#include "core/hle/service/caps/caps_manager.h"
#include "core/hle/service/caps/caps_su.h"
#include "core/hle/service/caps/caps_types.h"
#include "core/hle/service/ipc_helpers.h"
namespace Service::Capture {
IScreenShotApplicationService::IScreenShotApplicationService(Core::System& system_)
: ServiceFramework{system_, "caps:su"} {
IScreenShotApplicationService::IScreenShotApplicationService(
Core::System& system_, std::shared_ptr<AlbumManager> album_manager)
: ServiceFramework{system_, "caps:su"}, manager{album_manager} {
// clang-format off
static const FunctionInfo functions[] = {
{32, &IScreenShotApplicationService::SetShimLibraryVersion, "SetShimLibraryVersion"},
{201, nullptr, "SaveScreenShot"},
{203, nullptr, "SaveScreenShotEx0"},
{205, nullptr, "SaveScreenShotEx1"},
{203, &IScreenShotApplicationService::SaveScreenShotEx0, "SaveScreenShotEx0"},
{205, &IScreenShotApplicationService::SaveScreenShotEx1, "SaveScreenShotEx1"},
{210, nullptr, "SaveScreenShotEx2"},
};
// clang-format on
@@ -36,4 +39,62 @@ void IScreenShotApplicationService::SetShimLibraryVersion(HLERequestContext& ctx
rb.Push(ResultSuccess);
}
void IScreenShotApplicationService::SaveScreenShotEx0(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
ScreenShotAttribute attribute{};
AlbumReportOption report_option{};
INSERT_PADDING_BYTES(0x4);
u64 applet_resource_user_id{};
};
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto image_data_buffer = ctx.ReadBuffer();
LOG_INFO(Service_Capture,
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
parameters.report_option, image_data_buffer.size(),
parameters.applet_resource_user_id);
ApplicationAlbumEntry entry{};
const auto result = manager->SaveScreenShot(entry, parameters.attribute, image_data_buffer,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
void IScreenShotApplicationService::SaveScreenShotEx1(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
struct Parameters {
ScreenShotAttribute attribute{};
AlbumReportOption report_option{};
INSERT_PADDING_BYTES(0x4);
u64 applet_resource_user_id{};
};
static_assert(sizeof(Parameters) == 0x50, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto app_data_buffer = ctx.ReadBuffer(0);
const auto image_data_buffer = ctx.ReadBuffer(1);
LOG_INFO(Service_Capture,
"called, report_option={}, image_data_buffer_size={}, applet_resource_user_id={}",
parameters.report_option, image_data_buffer.size(),
parameters.applet_resource_user_id);
ApplicationAlbumEntry entry{};
ApplicationData app_data{};
std::memcpy(&app_data, app_data_buffer.data(), sizeof(ApplicationData));
const auto result =
manager->SaveScreenShot(entry, parameters.attribute, app_data, image_data_buffer,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 10};
rb.Push(result);
rb.PushRaw(entry);
}
} // namespace Service::Capture

View File

@@ -10,14 +10,20 @@ class System;
}
namespace Service::Capture {
class AlbumManager;
class IScreenShotApplicationService final : public ServiceFramework<IScreenShotApplicationService> {
public:
explicit IScreenShotApplicationService(Core::System& system_);
explicit IScreenShotApplicationService(Core::System& system_,
std::shared_ptr<AlbumManager> album_manager);
~IScreenShotApplicationService() override;
private:
void SetShimLibraryVersion(HLERequestContext& ctx);
void SaveScreenShotEx0(HLERequestContext& ctx);
void SaveScreenShotEx1(HLERequestContext& ctx);
std::shared_ptr<AlbumManager> manager;
};
} // namespace Service::Capture

View File

@@ -20,6 +20,8 @@ enum class AlbumImageOrientation {
enum class AlbumReportOption : s32 {
Disable,
Enable,
Unknown2,
Unknown3,
};
enum class ContentType : u8 {

View File

@@ -19,7 +19,9 @@ Controller_Palma::Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared
operation_complete_event = service_context.CreateEvent("hid:PalmaOperationCompleteEvent");
}
Controller_Palma::~Controller_Palma() = default;
Controller_Palma::~Controller_Palma() {
service_context.CloseEvent(operation_complete_event);
};
void Controller_Palma::OnInit() {}

View File

@@ -2757,6 +2757,10 @@ public:
joy_detach_event = service_context.CreateEvent("HidSys::JoyDetachEvent");
}
~HidSys() {
service_context.CloseEvent(joy_detach_event);
};
private:
void ApplyNpadSystemCommonPolicy(HLERequestContext& ctx) {
LOG_WARNING(Service_HID, "called");

View File

@@ -13,7 +13,10 @@ HidbusBase::HidbusBase(Core::System& system_, KernelHelpers::ServiceContext& ser
: system(system_), service_context(service_context_) {
send_command_async_event = service_context.CreateEvent("hidbus:SendCommandAsyncEvent");
}
HidbusBase::~HidbusBase() = default;
HidbusBase::~HidbusBase() {
service_context.CloseEvent(send_command_async_event);
};
void HidbusBase::ActivateDevice() {
if (is_activated) {

View File

@@ -23,6 +23,19 @@
#include "core/hle/service/ipc_helpers.h"
#include "core/memory.h"
namespace {
static thread_local std::array read_buffer_data_a{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
};
static thread_local std::array read_buffer_data_x{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
};
} // Anonymous namespace
namespace Service {
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
@@ -328,26 +341,61 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
}
}
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
static thread_local std::array read_buffer_a{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorA().size() > buffer_index, { return {}; },
"BufferDescriptorA invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_a[buffer_index];
return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
BufferDescriptorA()[buffer_index].Size(),
&read_buffer_data_a[buffer_index]);
}
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
static thread_local std::array read_buffer_x{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorX().size() > buffer_index, { return {}; },
"BufferDescriptorX invalid buffer_index {}", buffer_index);
auto& read_buffer = read_buffer_x[buffer_index];
return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
BufferDescriptorX()[buffer_index].Size(),
&read_buffer_data_x[buffer_index]);
}
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
static thread_local std::array read_buffer_a{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
static thread_local std::array read_buffer_data_a{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
static thread_local std::array read_buffer_x{
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
static thread_local std::array read_buffer_data_x{
Common::ScratchBuffer<u8>(),
Common::ScratchBuffer<u8>(),
Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
};
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
const bool is_buffer_x{BufferDescriptorX().size() > buffer_index &&
BufferDescriptorX()[buffer_index].Size()};
if (is_buffer_a && is_buffer_x) {
LOG_WARNING(Input, "Both buffer descriptors are available a.size={}, x.size={}",
BufferDescriptorA()[buffer_index].Size(),
BufferDescriptorX()[buffer_index].Size());
}
if (is_buffer_a) {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorA().size() > buffer_index, { return {}; },

View File

@@ -253,6 +253,12 @@ public:
return domain_message_header.has_value();
}
/// Helper function to get a span of a buffer using the buffer descriptor A
[[nodiscard]] std::span<const u8> ReadBufferA(std::size_t buffer_index = 0) const;
/// Helper function to get a span of a buffer using the buffer descriptor X
[[nodiscard]] std::span<const u8> ReadBufferX(std::size_t buffer_index = 0) const;
/// Helper function to get a span of a buffer using the appropriate buffer descriptor
[[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;

View File

@@ -21,10 +21,8 @@ ServiceContext::ServiceContext(Core::System& system_, std::string name_)
// Create the process.
process = Kernel::KProcess::Create(kernel);
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
Kernel::KProcess::ProcessType::KernelInternal,
kernel.GetSystemResourceLimit())
.IsSuccess());
ASSERT(R_SUCCEEDED(process->Initialize(Kernel::Svc::CreateProcessParameter{},
kernel.GetSystemResourceLimit(), false)));
// Register the process.
Kernel::KProcess::Register(kernel, process);

View File

@@ -41,6 +41,7 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) {
}
}
SetDefault();
SetGender(gender);
SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max));
SetRegionMove(0);

View File

@@ -41,7 +41,7 @@ bool BufferQueueCore::WaitForDequeueCondition(std::unique_lock<std::mutex>& lk)
s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
// If DequeueBuffer is allowed to error out, we don't have to add an extra buffer.
if (!use_async_buffer) {
return max_acquired_buffer_count;
return 0;
}
if (dequeue_buffer_cannot_block || async) {
@@ -52,7 +52,7 @@ s32 BufferQueueCore::GetMinUndequeuedBufferCountLocked(bool async) const {
}
s32 BufferQueueCore::GetMinMaxBufferCountLocked(bool async) const {
return GetMinUndequeuedBufferCountLocked(async) + 1;
return GetMinUndequeuedBufferCountLocked(async);
}
s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
@@ -61,7 +61,7 @@ s32 BufferQueueCore::GetMaxBufferCountLocked(bool async) const {
if (override_max_buffer_count != 0) {
ASSERT(override_max_buffer_count >= min_buffer_count);
max_buffer_count = override_max_buffer_count;
return override_max_buffer_count;
}
// Any buffers that are dequeued by the producer or sitting in the queue waiting to be consumed

View File

@@ -134,7 +134,7 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
const s32 max_buffer_count = core->GetMaxBufferCountLocked(async);
if (async && core->override_max_buffer_count) {
if (core->override_max_buffer_count < max_buffer_count) {
LOG_ERROR(Service_Nvnflinger, "async mode is invalid with buffer count override");
*found = BufferQueueCore::INVALID_BUFFER_SLOT;
return Status::BadValue;
}
}
@@ -142,7 +142,8 @@ Status BufferQueueProducer::WaitForFreeSlotThenRelock(bool async, s32* found, St
// Free up any buffers that are in slots beyond the max buffer count
for (s32 s = max_buffer_count; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
ASSERT(slots[s].buffer_state == BufferState::Free);
if (slots[s].graphic_buffer != nullptr) {
if (slots[s].graphic_buffer != nullptr && slots[s].buffer_state == BufferState::Free &&
!slots[s].is_preallocated) {
core->FreeBufferLocked(s);
*return_flags |= Status::ReleaseAllBuffers;
}

View File

@@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address,
// Get bounds of where mapping is possible.
const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart());
const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE;
const auto state = Kernel::KMemoryState::Io;
const auto state = Kernel::KMemoryState::IoMemory;
const auto perm = Kernel::KMemoryPermission::UserReadWrite;
std::mt19937_64 rng{process->GetRandomEntropy(0)};

View File

@@ -66,7 +66,6 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
"ScreenComposition",
[this](std::uintptr_t, s64 time,
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
{ const auto lock_guard = Lock(); }
vsync_signal.Set();
return std::chrono::nanoseconds(GetNextTicks());
});
@@ -99,6 +98,7 @@ Nvnflinger::~Nvnflinger() {
}
ShutdownLayers();
vsync_thread = {};
if (nvdrv) {
nvdrv->Close(disp_fd);
@@ -106,6 +106,7 @@ Nvnflinger::~Nvnflinger() {
}
void Nvnflinger::ShutdownLayers() {
const auto lock_guard = Lock();
for (auto& display : displays) {
for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
display.GetLayer(layer).Core().NotifyShutdown();
@@ -229,16 +230,6 @@ VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) {
return display->FindLayer(layer_id);
}
const VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) const {
const auto* const display = FindDisplay(display_id);
if (display == nullptr) {
return nullptr;
}
return display->FindLayer(layer_id);
}
VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
auto* const display = FindDisplay(display_id);
@@ -288,7 +279,6 @@ void Nvnflinger::Compose() {
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
ASSERT(nvdisp);
guard->unlock();
Common::Rectangle<int> crop_rect{
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
@@ -299,7 +289,6 @@ void Nvnflinger::Compose() {
buffer.fence.fences, buffer.fence.num_fences);
MicroProfileFlip();
guard->lock();
swap_interval = buffer.swap_interval;

View File

@@ -117,9 +117,6 @@ private:
/// Finds the layer identified by the specified ID in the desired display.
[[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
/// Finds the layer identified by the specified ID in the desired display.
[[nodiscard]] const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
/// Finds the layer identified by the specified ID in the desired display,
/// or creates the layer if it is not found.
/// To be used when the system expects the specified ID to already exist.

View File

@@ -141,6 +141,12 @@ public:
service_context.CreateEvent("IParentalControlService::RequestSuspensionEvent");
}
~IParentalControlService() {
service_context.CloseEvent(synchronization_event);
service_context.CloseEvent(unlinked_event);
service_context.CloseEvent(request_suspension_event);
};
private:
bool CheckFreeCommunicationPermissionImpl() const {
if (states.temporary_unlocked) {

View File

@@ -37,7 +37,7 @@ std::optional<Kernel::KProcess*> SearchProcessList(
void GetApplicationPidGeneric(HLERequestContext& ctx,
const std::vector<Kernel::KProcess*>& process_list) {
const auto process = SearchProcessList(process_list, [](const auto& proc) {
return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin;
return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
});
IPC::ResponseBuilder rb{ctx, 4};

View File

@@ -58,14 +58,8 @@ private:
IPC::RequestParser rp{ctx};
const auto process_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1);
}
return std::span<const u8>{};
}();
const auto data1 = ctx.ReadBufferA(0);
const auto data2 = ctx.ReadBufferX(0);
LOG_DEBUG(Service_PREPO,
"called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}",
@@ -85,14 +79,8 @@ private:
const auto user_id = rp.PopRaw<u128>();
const auto process_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1);
}
return std::span<const u8>{};
}();
const auto data1 = ctx.ReadBufferA(0);
const auto data2 = ctx.ReadBufferX(0);
LOG_DEBUG(Service_PREPO,
"called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, "
@@ -137,14 +125,8 @@ private:
IPC::RequestParser rp{ctx};
const auto title_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1);
}
return std::span<const u8>{};
}();
const auto data1 = ctx.ReadBufferA(0);
const auto data2 = ctx.ReadBufferX(0);
LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}",
title_id, data1.size(), data2.size());
@@ -161,14 +143,8 @@ private:
const auto user_id = rp.PopRaw<u128>();
const auto title_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1);
}
return std::span<const u8>{};
}();
const auto data1 = ctx.ReadBufferA(0);
const auto data2 = ctx.ReadBufferX(0);
LOG_DEBUG(Service_PREPO,
"called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, "

Some files were not shown because too many files have changed in this diff Show More