Compare commits

..

149 Commits

Author SHA1 Message Date
jacky400
87435e1088 Revert "buffer_cache: reset cached write bits after flushing invalidations" 2022-06-29 00:33:32 +08:00
bunnei
c78f6d4f20 Merge pull request #8504 from comex/mesosphere-current-process
Support `InfoType_MesosphereCurrentProcess`
2022-06-27 13:05:07 -07:00
bunnei
abfd690601 Merge pull request #8475 from liamwhite/x18
kernel: make current thread pointer thread local
2022-06-26 11:38:48 -07:00
comex
bf7e78795f Re-add missing case and braces, and trim whitespace 2022-06-25 18:01:56 -07:00
comex
a14438d013 Update src/core/hle/kernel/svc.cpp
Co-authored-by: liamwhite <liamwhite@users.noreply.github.com>
2022-06-25 18:00:29 -07:00
comex
48737a4bb2 Support InfoType_MesosphereCurrentProcess 2022-06-25 16:23:23 -07:00
bunnei
b321c39371 Merge pull request #8500 from liamwhite/poke
gdbstub: fix register pokes
2022-06-25 12:31:20 -07:00
Liam
19f475fd70 gdbstub: fix register pokes 2022-06-25 12:07:20 -04:00
Liam
2c56e94702 kernel: make current thread pointer thread local 2022-06-23 00:28:00 -04:00
bunnei
95b844dbae Merge pull request #8491 from Morph1984/extra-assert
KPageTable: Remove extraneous assert
2022-06-22 14:47:07 -07:00
bunnei
9da4e62573 Merge pull request #8483 from liamwhite/fire-emblem-three-semaphores
kernel: wait for threads to stop on pause
2022-06-22 14:46:33 -07:00
Morph
1c8f6ba18f KPageTable: Remove extraneous assert
Since start is always 0 and VAddr is unsigned, we can safely remove this assert.
2022-06-21 21:28:54 -04:00
Morph
ab0e71d7cb Merge pull request #8455 from lat9nq/mingw-clang
ci/windows: Use Clang for MinGW builds
2022-06-21 20:21:13 -04:00
bunnei
737c446fc1 Merge pull request #8432 from liamwhite/watchpoint
core/debugger: memory breakpoint support
2022-06-21 16:04:57 -07:00
bunnei
73e13aa090 Merge pull request #8468 from liamwhite/dispatch-tracking
kernel: fix some uses of disable_count
2022-06-21 15:30:27 -07:00
liamwhite
0d5792cc57 Merge pull request #8487 from german77/system-button
service: am: Stub PerformSystemButtonPressingIfInFocus
2022-06-20 16:59:26 -04:00
Narr the Reg
f37b2e6f10 service: am: Stub PerformSystemButtonPressingIfInFocus
Used by Ring Fit Adventure
2022-06-20 12:35:58 -05:00
Liam
24d7aaf43c kernel: wait for threads to stop on pause 2022-06-18 16:54:33 -04:00
Morph
5b2b15091f Merge pull request #8476 from liamwhite/gpu-wasnt-ready
core: fix initialization in single core, sync GPU mode
2022-06-17 03:08:15 -04:00
lat9nq
c42fde2a37 ci/windows: Build using Clang
Uses the MinGWClangCross toolchain script to build yuzu. Disables our
bundled SDL2 to use the system ones that have been modified to not use
`-mwindows`. Also set's `-e` to stop the script on an error (as opposed
to packaging nothing).

Uses LLVM's linker for linking yuzu. Adds -femulated-tls due to a
libstdc++ incompatibility between GCC and Clang in vulkan_common.
2022-06-16 23:57:39 -04:00
lat9nq
fef3d8acb5 CMakeModules: Add MinGWClangCross
Facilitates what programs we need for cross-compiling to Windows from
Linux using LLVM's compilers. Based on MinGWCross
2022-06-16 23:57:39 -04:00
lat9nq
e56410b404 ci/windows: Split up cmake command
Improves readability.
2022-06-16 23:57:39 -04:00
Liam
a6371fb69d core: fix initialization in single core, sync GPU mode 2022-06-16 23:43:35 -04:00
Morph
a33e7c13fa Merge pull request #8472 from german77/tace
common: param_package: Demote DEBUG to TRACE for getters
2022-06-16 16:43:32 -04:00
Morph
945f3222ae Merge pull request #8474 from DCNick3/yuzu-cmd-respect-log-filter
Make yuzu-cmd respect log_filter setting
2022-06-16 16:43:18 -04:00
Nikita Strygin
9e384ed54b Make yuzu-cmd respect log_filter setting
Because logging infrastructure initializes before the loading of the
config, it reads the default setting for log_filter and ignores the one
set in config. To change log_filter after logging initialization some
additional calls need to be made.
2022-06-16 23:39:50 +03:00
liamwhite
561f5c9c14 Merge pull request #8473 from DCNick3/implement-exit-process
Implement ExitProcess svc
2022-06-16 15:45:02 -04:00
Nikita Strygin
cf7e4bda92 Implement ExitProcess svc
Currently this just stops all the emulation
This works under assumption that only application will try to use
ExitProcess, with services not touching it
If application exits - it quite makes sense to end the emulation
2022-06-16 21:35:34 +03:00
Liam
208ed712f4 core/debugger: memory breakpoint support 2022-06-16 13:18:07 -04:00
Narr the Reg
d1f2f5f146 common: param_package: Demote DEBUG to TRACE for getters 2022-06-16 10:27:59 -05:00
Liam
744a208763 kernel: fix some uses of disable_count 2022-06-15 20:53:49 -04:00
Fernando S
f86b770ff7 Merge pull request #8457 from liamwhite/kprocess-suspend
kernel: implement KProcess suspension
2022-06-16 02:41:12 +02:00
liamwhite
0ae4eae9a6 Merge pull request #8460 from Morph1984/bounded-q
bounded_threadsafe_queue: Use constexpr capacity and mask
2022-06-15 19:39:22 -04:00
Morph
25429998e3 bounded_threadsafe_queue: Use constexpr capacity and mask
While this is the primary change, we also:
- Remove the mpsc namespace and rename Queue to MPSCQueue
- Make Slot a private struct within MPSCQueue
- Remove the AlignedAllocator template argument, as we use std::allocator
- Replace instances of mask + 1 with capacity, and mask + 2 with capacity + 1
2022-06-15 16:59:13 -04:00
bunnei
5ace5c1b7a Merge pull request #8317 from german77/notifa
service: notifa: Implement most part of this service
2022-06-15 09:53:50 -07:00
Mai
23514388ed Merge pull request #8464 from liamwhite/break-debug
kernel: notify debugger on break SVC
2022-06-15 11:55:54 -04:00
Mai
f117351783 Merge pull request #8465 from Morph1984/why-msvc
vk_compute_pass: Explicitly cast to VkAccessFlags
2022-06-15 11:55:40 -04:00
Morph
4572634a4e vk_compute_pass: Explicitly cast to VkAccessFlags
According to the standard, a narrowing conversion is an implicit conversion from an integer or unscoped enumeration type to an integer type that cannot represent all the values of the original type, except when the value is a literal or constant expression.
MSVC, unlike GCC or Clang, determines this to be a narrowing conversion despite the enumeration exclusively containing values that fit within the range of a 32 bit integer, emitting a warning since designated initializers prohibit narrowing conversions.
To solve this, explicitly cast to the type we are initializing.
2022-06-15 07:12:16 -04:00
Mai
103997ee56 Merge pull request #8383 from Morph1984/shadow-of-the-past
yuzu: Make variable shadowing a compile-time error
2022-06-14 21:08:58 -04:00
Mai
c9de5474bf Merge pull request #8462 from liamwhite/dynarmic-profile
core: centralize profile scope for Dynarmic
2022-06-14 21:07:47 -04:00
Liam
a7358ff1d4 kernel: notify debugger on break SVC 2022-06-14 21:06:23 -04:00
Liam
20eab9fed9 core: centralize profile scope for Dynarmic 2022-06-14 18:19:04 -04:00
Morph
7620e1a631 externals: Update cpp-httplib to latest 2022-06-14 14:09:51 -04:00
Morph
0eeee431dc main: Eliminate variable shadowing 2022-06-14 14:09:51 -04:00
Liam
888f499188 kernel: implement KProcess suspension 2022-06-14 10:04:11 -04:00
Morph
c6e7ca562a Merge pull request #8461 from Morph1984/msvc-narrow-conv
vk_compute_pass: Use VK_ACCESS_NONE
2022-06-14 09:28:45 -04:00
Morph
a9b4dd022c Merge pull request #8434 from german77/uuid
input_common: Replace usage of string guid to common uuid
2022-06-14 09:28:32 -04:00
Morph
5568763a57 vk_compute_pass: Use VK_ACCESS_NONE
This enumeration was introduced in Vulkan 1.3, prefer using this instead of defaulting the enum.

Also resolves a narrowing conversion warning on MSVC.
2022-06-14 09:14:13 -04:00
Mai
a3b12e3809 Merge pull request #8439 from liamwhite/monkey-compiler
general: fix compilation on GCC 12
2022-06-14 08:34:16 -04:00
Morph
742f021fdf wait_tree: Eliminate variable shadowing 2022-06-14 08:30:09 -04:00
Morph
95bcf6ac38 configure_ringcon: Eliminate variable shadowing 2022-06-14 08:30:09 -04:00
Morph
e371961219 configure_touch_from_button: Eliminate variable shadowing 2022-06-14 08:30:09 -04:00
Morph
5503338f21 configure_per_game: Eliminate variable shadowing 2022-06-14 08:30:08 -04:00
Morph
fe7184c2a8 configure_input_player: Eliminate variable shadowing 2022-06-14 08:30:08 -04:00
Morph
1c83014526 configure_dialog: Eliminate variable shadowing 2022-06-14 08:30:08 -04:00
Morph
2d903e3ce6 bootmanager: Eliminate variable shadowing 2022-06-14 08:30:08 -04:00
Morph
e29e8eec2f game_list: Eliminate variable shadowing 2022-06-14 08:30:07 -04:00
Mai
dc47d0f624 Merge pull request #8459 from Morph1984/wextra-gcc
vk_compute_pass: Silence Wextra warning
2022-06-14 08:22:38 -04:00
Morph
8b55f2c615 externals: microprofileui: Eliminate variable shadowing 2022-06-14 05:52:15 -04:00
Morph
fcfe192e83 vk_compute_pass: Silence Wextra warning
Silences a warning about using enumerated and non-enumerated types in a conditional expression.
2022-06-14 05:29:57 -04:00
Liam
bd38aefc57 kernel: fix passthrough of local captures in lambda 2022-06-13 20:09:32 -04:00
Liam
feaf010fa2 common/assert: rework ASSERT handling to avoid std::function usage 2022-06-13 20:09:32 -04:00
Liam
ebecdd3a74 general: fix compilation on MinGW GCC 12 2022-06-13 20:09:32 -04:00
Liam
a29ddcee40 common/assert: add unlikely 2022-06-13 20:09:32 -04:00
Liam
d11547024c general: fix compilation on GCC 12 2022-06-13 20:09:30 -04:00
Liam
6f59e2676b kernel: ensure class token lambda exit is unreachable 2022-06-13 20:09:00 -04:00
Liam
8fea7e56e5 kernel: fix inconsistency in AutoObjectTraits macro definitions 2022-06-13 20:09:00 -04:00
Liam
58fea44eb5 common: Don't test ASSERT conditions inline 2022-06-13 20:09:00 -04:00
Liam
084d7d6b01 common: Change semantics of UNREACHABLE to unconditionally crash 2022-06-13 20:09:00 -04:00
liamwhite
bd3bfe411d Merge pull request #8458 from lat9nq/no-constexpr-flow-block
structured_control_flow: Remove constexpr Flow::Block
2022-06-13 20:06:38 -04:00
lat9nq
963ed37fd6 structured_control_flow: Remove constexpr Flow::Block
This seems to be unsupported in newer libstdc++ versions due to
Flow::Block's base class being a non-literal type. It's not clear to me
why this was permitted in earlier versions.
2022-06-13 19:18:20 -04:00
bunnei
741da9c8bf Merge pull request #8388 from liamwhite/simpler-pause
CpuManager: simplify pausing
2022-06-13 15:48:03 -07:00
Morph
69d92a19a5 yuzu_cmd: Eliminate variable shadowing 2022-06-13 18:19:23 -04:00
Morph
8671aa8dd0 audio_core: Remove -Werror=unused-parameter
Removing this as we don't enforce unused parameter warnings elsewhere in the project, and explicitly specify -Wno-unused-parameter in the main CMakeLists.
2022-06-13 18:19:23 -04:00
Morph
efc89c032b CMakeLists: Make variable shadowing a compile-time error
Now that the entire project is free of variable shadowing, we can enforce this as a compile time error to prevent any further introduction of this logic bug.
2022-06-13 18:19:23 -04:00
Morph
d0328f49f1 externals: microprofile: Eliminate variable shadowing 2022-06-13 18:19:23 -04:00
Morph
c1bd602e4c common: Eliminate variable shadowing
GCC/Clang treats variables within lambdas as potentially shadowing those outside the lambda, despite them not being captured inside the lambda's capture list.
2022-06-13 18:19:22 -04:00
Morph
b3d6f7bdd8 yuzu: Eliminate variable shadowing 2022-06-13 18:19:22 -04:00
Morph
12156b199a web_service: Eliminate variable shadowing 2022-06-13 18:19:22 -04:00
Morph
a0407a8e64 Merge pull request #8446 from liamwhite/cmd-gdb
core/debugger: support operation in yuzu-cmd
2022-06-13 14:38:37 -04:00
Morph
7582717c9d Merge pull request #8454 from liamwhite/inaddr-any
core/debugger: allow remote connections
2022-06-13 14:38:20 -04:00
bunnei
ec85eac3c9 Merge pull request #8443 from liamwhite/code-mem
kernel: fix KCodeMemory initialization
2022-06-13 11:32:27 -07:00
Liam
fb4b507ba4 core/debugger: allow remote connections 2022-06-12 11:50:50 -04:00
liamwhite
7ea78699a1 Merge pull request #8450 from lioncash/undef
gdbstub_arch: Add missing virtual destructor
2022-06-11 19:59:18 -04:00
Lioncash
80ad90651e gdbstub_arch: Add missing virtual destructor
The class is used polymorphically, so it's undefined behavior to delete
instances of GDBStubA64 and GDBStubA32 from the base class pointer.
2022-06-11 18:23:22 -04:00
Mai M
b94739cfa7 Merge pull request #8353 from Docteh/msvc_report_runtime
log the MSVC runtime version when running on MSVC build
2022-06-11 13:21:23 -04:00
Mai M
89e00c442d Merge pull request #8427 from Docteh/deprecate_qdesktop
deprecate usage of QDesktopWidget for going fullscreen
2022-06-11 13:20:36 -04:00
Mai M
d796341d33 Merge pull request #8449 from Docteh/translate_placeholder
retranslate the game list placeholder
2022-06-11 13:19:18 -04:00
bunnei
5282efac1b Merge pull request #8413 from behunin/bounded-queue
gpu_thread: Move to bounded queue
2022-06-11 00:07:18 -07:00
bunnei
ae83d5c6d3 Merge pull request #8393 from lat9nq/default-vulkan
general: Set renderer_backend's default to Vulkan
2022-06-11 00:06:59 -07:00
Kyle Kienapfel
3370546a7a log the MSVC runtime version when running on MSVC build
This might be useful information, not 100% sure.

[   0.958068] Frontend <Info> yuzu\main.cpp:GMainWindow:275: yuzu Version: yuzu Development Build | master-0b9ef3c0b-dirty
[   0.958095] Frontend <Info> yuzu\main.cpp:LogRuntimes:220: MSVC Compiler: 1931 Runtime: 14.32.31326.0
2022-06-10 20:37:47 -07:00
Kyle Kienapfel
2ff606628c UI: retranslate the game list placeholder
This is the "Double-click to add a new folder to the game list" message
that shows up when users first launch yuzu and is most likely never seen
again. Previously this message was not re-translated.
2022-06-10 20:15:52 -07:00
Mai M
20576ebb43 Merge pull request #8405 from Docteh/dock_undock
ui: Status bars dock button becomes DOCKED/HANDHELD button
2022-06-10 23:11:29 -04:00
Mai M
6f81160160 Merge pull request #8333 from Docteh/translate_hotkeys
UI: Translate hotkey labels in configuration
2022-06-10 23:10:28 -04:00
Mai M
266e086706 Merge pull request #8318 from Docteh/cmake-qt56-entry
Update some files with Qt 5.15.2 best practices in mind
2022-06-10 23:09:49 -04:00
Mai M
9561a2f5b1 Merge pull request #8448 from german77/gesturetypo
service: hid: Fix gesture regression
2022-06-10 15:09:22 -04:00
Narr the Reg
bc8699a9fa service: hid: Fix gesture regression 2022-06-10 13:14:31 -05:00
Liam
c3cc65a11e yuzu-cmd: ignore bogus timeous from SDL 2022-06-10 12:49:18 -04:00
Liam
1f0fee33ed core/debugger: fix a number of shutdown deadlocks 2022-06-10 09:17:12 -04:00
Liam
de6c0defb3 core/debugger: support operation in yuzu-cmd 2022-06-10 09:11:02 -04:00
Liam
6c659c3a16 kernel: fix KCodeMemory initialization 2022-06-09 12:33:28 -04:00
Liam
af022294dd CpuManager: simplify pausing 2022-06-08 21:47:29 -04:00
bunnei
073714a762 Merge pull request #8428 from bunnei/nvflinger-fix-timing
Follow-up fixes for NVFlinger rewrite (Part 3)
2022-06-08 11:20:05 -07:00
bunnei
4ae75bec50 Merge pull request #8436 from liamwhite/asio-usage
core/debugger: fix asio write usage
2022-06-07 14:16:47 -07:00
Mai M
31527ccd25 Merge pull request #8435 from liamwhite/lambda-capture
core/debugger: fix crash due to incorrect lambda capture
2022-06-06 23:58:34 -04:00
Liam
268878f895 core/debugger: fix asio write usage 2022-06-06 23:50:56 -04:00
Liam
d00b7be2d6 core/debugger: fix crash due to incorrect lambda capture 2022-06-06 23:39:48 -04:00
Narr the Reg
28877cea31 input_common: Replace usage of string guid to common uuid 2022-06-06 19:56:37 -05:00
Kyle Kienapfel
941b663352 deprecate usage of QDesktopWidget for going fullscreen
Idea works as follows, while going fullscreen we compare the current window geometry with
available screens and ask for an intersection rectangle, we go fullscreen where most of
the window is located

GuessCurrentScreen could also potentially be used to see which screen
the window is on for dynamic DPI handling
2022-06-05 20:18:27 -07:00
bunnei
708e5b027f Merge pull request #8367 from Docteh/say_win11
Logging: Report Post Windows 10 2004 versions, like Windows 11
2022-06-05 18:44:48 -07:00
bunnei
c33c9c76bf Merge pull request #8426 from liamwhite/elf
common: consolidate ELF structure definitions
2022-06-05 16:52:06 -07:00
bunnei
888e814130 hle: service: nvflinger: buffer_queue_consumer: Always free released buffers. 2022-06-05 16:06:06 -07:00
Mai M
cad53179ed Merge pull request #8419 from liamwhite/library-list
gdbstub: add missing library list query
2022-06-05 18:23:29 -04:00
Liam
3c313a43fd common: consolidate ELF structure definitions 2022-06-05 09:42:05 -04:00
bunnei
45bdbf538c Merge pull request #8395 from german77/ir_stub
service: hid: Improve stub of IRS
2022-06-04 01:26:08 -07:00
lat9nq
4544407af6 configure_graphics: Remove unused include 2022-06-04 04:18:21 -04:00
Liam
2f2e443858 gdbstub: add missing library list command 2022-06-03 20:42:13 -04:00
Fernando S
14db101148 Merge pull request #8414 from bylaws/master
Maxwell3D: Fix 3D semaphore counter type 0 handling
2022-06-03 18:02:10 +02:00
Levi Behunin
4dd6bcd206 gpu_thread: Move to bounded queue 2022-06-02 19:37:46 -06:00
Billy Laws
ea89cf8639 Maxwell3D: Fix 3D semaphore counter type 0 handling
Counter type 0 actually releases the semaphore payload rather than a constant zero as was previously thought. This is required by Skyrim.
2022-06-02 21:46:38 +01:00
Mai M
5c0a31e29f Merge pull request #8410 from liamwhite/thread-names
gdbstub: Support reading guest thread names
2022-06-02 16:34:41 -04:00
Liam
07922abffc core/debugger: Support reading guest thread names 2022-06-01 21:25:32 -04:00
Mai M
114a4562ed Merge pull request #8409 from liamwhite/tdesc-fix
gdbstub: fix target descriptions
2022-06-01 21:16:33 -04:00
Morph
858f8ac6d9 Merge pull request #8402 from liamwhite/better-step
core/debugger: Improved stepping mechanism and misc fixes
2022-06-01 20:46:10 -04:00
Liam
b71130e6f1 gdbstub: fix target descriptions 2022-06-01 20:31:24 -04:00
Kyle Kienapfel
054732210e ui: Status bars dock button becomes dock/undock button
For people not used to the Yuzu UI it's not always clear if the emulated
console is docked or not.  The other items update their text when clicked,
this PR brings the DOCK button in line with this.

DOCK -> DOCKED or HANDHELD
2022-06-01 17:22:53 -07:00
bunnei
af418eb666 Merge pull request #8400 from Docteh/fullscreen_glitch
fix UI opening fullscreen after certain crashes
2022-06-01 10:26:24 -07:00
liamwhite
a2f6a2480d Merge pull request #8404 from Morph1984/virtual
core/debugger: Define defaulted virtual destructors
2022-06-01 12:30:47 -04:00
liamwhite
503feba7e4 Merge pull request #8403 from Morph1984/cast
gdbstub: Explicitly cast return type to u8
2022-06-01 12:30:32 -04:00
Morph
69511aed3d core/debugger: Define defaulted virtual destructors
Resolves an MSVC warning where a virtual destructor is not defined in the base class with virtual functions.
2022-06-01 02:28:34 -04:00
Liam
989d4a7a41 core/debugger: Improved stepping mechanism and misc fixes 2022-06-01 02:15:15 -04:00
Morph
a32f6e9d8e gdbstub: Explicitly cast return type to u8
Otherwise, the addition promotes the returned value to an int instead of keeping it as a u8.
2022-06-01 01:40:18 -04:00
Kyle Kienapfel
36df3ce97e fix UI opening fullscreen after certain crashes
Sometimes when yuzu crashes, it restarts with the games list in fullscreen,
which would be fine, except there isn't an easy way to exit this.
It also doesn't occur often enough for qt-config.ini files to be in good supply.

UILayout\geometry value in qt-config.ini is the culprit,
 at least for the one provided.

Proposed fix is to simply check isFullScreen when yuzu is starting up,
and take it out of full screen immediately
2022-05-31 21:24:31 -07:00
Mai M
de2f2e5140 Merge pull request #8394 from liamwhite/debugger
core/debugger: Implement new GDB stub debugger
2022-06-01 00:19:49 -04:00
Liam
fb4b3c127f core/debugger: Implement new GDB stub debugger 2022-06-01 00:01:25 -04:00
Mai M
72b34650f9 Merge pull request #8401 from zhaobot/tx-update-20220601034505
Update translations (2022-06-01)
2022-05-31 23:53:39 -04:00
Narr the Reg
e609bc1c6a service: hid: Improve stub of IRS 2022-05-31 10:26:13 -05:00
lat9nq
422525e3fb main: Insert warning text on broken Vulkan
Co-authored-by: Schplee <24275329+Schplee@users.noreply.github.com>
2022-05-30 10:58:19 -04:00
lat9nq
2dafb27055 main: Save config on broken Vulkan detect
Prevents possible issues if someone were to open yuzu repeatedly over
and over again.
2022-05-30 10:58:19 -04:00
lat9nq
500b01076e yuzu-qt: Make has_broken_vulkan only for crashes
Being able to catch and handle a Vulkan exception is not what this is
for.
2022-05-30 10:58:18 -04:00
lat9nq
b43ae9d5ed vulkan_library: Add debug logging 2022-05-30 10:57:59 -04:00
lat9nq
f22867efc5 yuzu-qt: Attempt to workaround broken Vulkan installations
This does a few things in order to make the default setting Vulkan
workable.

- When yuzu boots, it just opens the Vulkan library.
  - If it works, all good and we continue with Vulkan as the default.
  - If something breaks, a new file in the config directory will be left
    behind (this is deleted normally).
- If Vulkan is not working, has_broken_vulkan is set to true.
  - The first time this happens, a warning is displayed to notify the
    user.
  - This forces use of OpenGL, and Vulkan cannot be selected.
  - The Shader Backend selector is made accessible for use in custom
    configurations.
  - To disable has_broken_vulkan, the user needs to press a button in
    Graphics Configuration to manually run the Vulkan device
    enumeration.
2022-05-30 10:57:59 -04:00
lat9nq
67fa743414 default_ini: Reflect new renderer backend default setting 2022-05-29 21:38:36 -04:00
lat9nq
5799fa4d7d settings: Set Vulkan to the default renderer backend 2022-05-29 21:38:36 -04:00
Kyle K
499c89790b motion touch ui: move remaining connection out of .ui file
Two reasons for this:
1. Out of 7 connections, 6 are in ConfigureMotionTouch::ConnectEvents,
   this is the outlier.
2. Qt6 doesn't moc the connection properly
2022-05-29 18:37:38 -07:00
Kyle K
75bf2c20eb Update some files with Qt 5.15.2 best practices in mind
There was some discussion about updating to Qt6 and I figured I would
work on some smaller parts. For Windows platform the WinMain function has moved
from the Qt5::WinMain to a new one called Qt6::EntryPointPrivate

Also Qt5 supports versionless CMake targets
https://www.qt.io/blog/versionless-cmake-targets-qt-5.15

These other changes in this commit are to support Qt6, but in ways that don't mess with Qt5.

src/yuzu/bootmanager.cpp: Qt6 complains about not being able to know to use QPoint or QPointF, picking QPoint
src/yuzu/bootmanager.h: Qt6 prefers that QStringList.h be included rather than an empty class definition
src/yuzu/configuration/configure_system.cpp: toULongLong intends to return unsigned 64 bit integer, but
   Settings::values.rng_seed is only 32 bits wide
src/yuzu/game_list.cpp: Qt6 returns a different datatype for QStringList.length than Qt5,
   it used to be int, but in Qt6 its now qsizetype
src/yuzu/loading_screen.cpp: Qt5's for QStyleOption.init say to switch to initFrom.
   The QStyleOption.init doesn't exist in Qt6
src/yuzu/main.cpp: Another QPointer and QStringList.size, lets standardize on size()
2022-05-29 09:21:52 -07:00
Kyle K
017a18f42e Logging: Report Post Windows 10 2004 versions, like Windows 11
Qt5 and Qt6 don't really do a good job of reporting Windows versions past the 2004 version.

Current: Windows 10 Version 2009
This Patch: Windows 10 Version 21H1 (Build 19043.1706)
Also: Windows 11 Version 21H2 (Build 22000.675)

Fixes: #8362
2022-05-28 20:48:19 -07:00
Kyle K
669a9a644d UI: Translate hotkey labels in configuration
Another request from GillianMC.

The translated strings have been placed in a separate "Hotkeys" context as an alternative
 to having to add the tr function to the Config class, or adding them to ConfigureHotkeys
context which is quite long. The English strings get attached to the items in the Action
column as "data", and are used for RetranslateUI and saving the hotkey configuration.
2022-05-18 22:05:19 -07:00
german77
cc6a4bedfc service: notifa: Implement most part of this service
Implements partially RegisterAlarmSetting, UpdateAlarmSetting, LoadApplicationParameter, DeleteAlarmSetting.
Needed for Fitness `Boxing 2: Rhythm & Exercise` and `Ring Fit Adventure`.
2022-05-09 10:28:04 -05:00
203 changed files with 5015 additions and 1332 deletions

View File

@@ -1,12 +1,27 @@
#!/bin/bash -ex
set -e
cd /yuzu
ccache -s
mkdir build || true && cd build
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_QT_TRANSLATION=ON
ninja
LDFLAGS="-fuse-ld=lld"
# -femulated-tls required due to an incompatibility between GCC and Clang
# TODO(lat9nq): If this is widespread, we probably need to add this to CMakeLists where appropriate
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS="-femulated-tls" \
-DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWClangCross.cmake" \
-DDISPLAY_VERSION=$1 \
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
-DENABLE_QT_TRANSLATION=ON \
-DUSE_CCACHE=ON \
-DYUZU_USE_BUNDLED_SDL2=OFF \
-DYUZU_USE_EXTERNAL_SDL2=OFF \
-GNinja
ninja yuzu yuzu-cmd
ccache -s

View File

@@ -222,6 +222,11 @@ else()
list(APPEND CONAN_REQUIRED_LIBS "boost/1.79.0")
endif()
# boost:asio has functions that require AcceptEx et al
if (MINGW)
find_library(MSWSOCK_LIBRARY mswsock REQUIRED)
endif()
# Attempt to locate any packages that are required and report the missing ones in CONAN_REQUIRED_LIBS
yuzu_find_packages()

View File

@@ -0,0 +1,55 @@
set(MINGW_PREFIX /usr/x86_64-w64-mingw32/)
set(CMAKE_SYSTEM_NAME Windows)
set(CMAKE_SYSTEM_PROCESSOR x86_64)
set(CMAKE_FIND_ROOT_PATH ${MINGW_PREFIX})
set(SDL2_PATH ${MINGW_PREFIX})
set(MINGW_TOOL_PREFIX ${CMAKE_SYSTEM_PROCESSOR}-w64-mingw32-)
# Specify the cross compiler
set(CMAKE_C_COMPILER ${MINGW_TOOL_PREFIX}clang)
set(CMAKE_CXX_COMPILER ${MINGW_TOOL_PREFIX}clang++)
set(CMAKE_RC_COMPILER ${MINGW_TOOL_PREFIX}windres)
set(CMAKE_C_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
set(CMAKE_CXX_COMPILER_AR ${MINGW_TOOL_PREFIX}ar)
set(CMAKE_C_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
set(CMAKE_CXX_COMPILER_RANLIB ${MINGW_TOOL_PREFIX}ranlib)
# Mingw tools
set(STRIP ${MINGW_TOOL_PREFIX}strip)
set(WINDRES ${MINGW_TOOL_PREFIX}windres)
set(ENV{PKG_CONFIG} ${MINGW_TOOL_PREFIX}pkg-config)
# ccache wrapper
option(USE_CCACHE "Use ccache for compilation" OFF)
if(USE_CCACHE)
find_program(CCACHE ccache)
if(CCACHE)
message(STATUS "Using ccache found in PATH")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE})
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE})
else(CCACHE)
message(WARNING "USE_CCACHE enabled, but no ccache found")
endif(CCACHE)
endif(USE_CCACHE)
# Search for programs in the build host directories
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# Echo modified cmake vars to screen for debugging purposes
if(NOT DEFINED ENV{MINGW_DEBUG_INFO})
message("")
message("Custom cmake vars: (blank = system default)")
message("-----------------------------------------")
message("* CMAKE_C_COMPILER : ${CMAKE_C_COMPILER}")
message("* CMAKE_CXX_COMPILER : ${CMAKE_CXX_COMPILER}")
message("* CMAKE_RC_COMPILER : ${CMAKE_RC_COMPILER}")
message("* WINDRES : ${WINDRES}")
message("* ENV{PKG_CONFIG} : $ENV{PKG_CONFIG}")
message("* STRIP : ${STRIP}")
message("* USE_CCACHE : ${USE_CCACHE}")
message("")
# So that the debug info only appears once
set(ENV{MINGW_DEBUG_INFO} SHOWN)
endif()

View File

@@ -58,6 +58,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #109010;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #000000;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 21px;
min-height: 21px;

View File

@@ -1304,6 +1304,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #ffffff;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 23px;
min-height: 23px;

View File

@@ -2207,6 +2207,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #ffffff;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 19px;
min-height: 19px;

View File

@@ -40,6 +40,11 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
add_library(microprofile INTERFACE)
target_include_directories(microprofile INTERFACE ./microprofile)
# GCC bugs
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND MINGW)
target_compile_options(microprofile INTERFACE "-Wno-array-bounds")
endif()
# libusb
if (NOT LIBUSB_FOUND OR YUZU_USE_BUNDLED_LIBUSB)
add_subdirectory(libusb)

View File

@@ -1246,7 +1246,7 @@ struct MicroProfileScopeLock
{
bool bUseLock;
std::recursive_mutex& m;
MicroProfileScopeLock(std::recursive_mutex& m) : bUseLock(g_bUseLock), m(m)
MicroProfileScopeLock(std::recursive_mutex& m_) : bUseLock(g_bUseLock), m(m_)
{
if(bUseLock)
m.lock();

View File

@@ -213,8 +213,8 @@ struct MicroProfileCustom
struct SOptionDesc
{
SOptionDesc(){}
SOptionDesc(uint8_t nSubType, uint8_t nIndex, const char* fmt, ...):nSubType(nSubType), nIndex(nIndex)
SOptionDesc()=default;
SOptionDesc(uint8_t nSubType_, uint8_t nIndex_, const char* fmt, ...):nSubType(nSubType_), nIndex(nIndex_)
{
va_list args;
va_start (args, fmt);
@@ -573,10 +573,10 @@ inline void MicroProfileToolTipMeta(MicroProfileStringArray* pToolTip)
}
else
{
for(int i = 0; i < MICROPROFILE_META_MAX; ++i)
for(int k = 0; k < MICROPROFILE_META_MAX; ++k)
{
nMetaSumInclusive[i] += nMetaSum[i];
nMetaSum[i] = 0;
nMetaSumInclusive[k] += nMetaSum[k];
nMetaSum[k] = 0;
}
}
break;
@@ -708,10 +708,10 @@ inline void MicroProfileDrawFloatTooltip(uint32_t nX, uint32_t nY, uint32_t nTok
if(UI.nMouseLeftMod)
{
int nIndex = (g_MicroProfileUI.LockedToolTipFront + MICROPROFILE_TOOLTIP_MAX_LOCKED - 1) % MICROPROFILE_TOOLTIP_MAX_LOCKED;
g_MicroProfileUI.nLockedToolTipColor[nIndex] = S.TimerInfo[nTimerId].nColor;
MicroProfileStringArrayCopy(&g_MicroProfileUI.LockedToolTips[nIndex], &ToolTip);
g_MicroProfileUI.LockedToolTipFront = nIndex;
int nToolTipIndex = (g_MicroProfileUI.LockedToolTipFront + MICROPROFILE_TOOLTIP_MAX_LOCKED - 1) % MICROPROFILE_TOOLTIP_MAX_LOCKED;
g_MicroProfileUI.nLockedToolTipColor[nToolTipIndex] = S.TimerInfo[nTimerId].nColor;
MicroProfileStringArrayCopy(&g_MicroProfileUI.LockedToolTips[nToolTipIndex], &ToolTip);
g_MicroProfileUI.LockedToolTipFront = nToolTipIndex;
}
}
@@ -917,9 +917,8 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
float fStart = floor(fMsBase*fRcpStep) * fStep;
for(float f = fStart; f < fMsEnd; )
{
float fStart = f;
float fNext = f + fStep;
MicroProfileDrawBox(((fStart-fMsBase) * fMsToScreen), nBaseY, (fNext-fMsBase) * fMsToScreen+1, nBaseY + nHeight, UI.nOpacityBackground | g_nMicroProfileBackColors[nColorIndex++ & 1]);
MicroProfileDrawBox(((f-fMsBase) * fMsToScreen), nBaseY, (fNext-fMsBase) * fMsToScreen+1, nBaseY + nHeight, UI.nOpacityBackground | g_nMicroProfileBackColors[nColorIndex++ & 1]);
f = fNext;
}
}
@@ -1116,9 +1115,9 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
nMaxStackDepth = MicroProfileMax(nMaxStackDepth, nStackPos);
float fMsStart = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickStart);
float fMsEnd = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickEnd);
float fMsEnd2 = fToMs * MicroProfileLogTickDifference(nBaseTicks, nTickEnd);
float fXStart = fMsStart * fMsToScreen;
float fXEnd = fMsEnd * fMsToScreen;
float fXEnd = fMsEnd2 * fMsToScreen;
float fYStart = (float)(nY + nStackPos * nYDelta);
float fYEnd = fYStart + (MICROPROFILE_DETAILED_BAR_HEIGHT);
float fXDist = MicroProfileMax(fXStart - fMouseX, fMouseX - fXEnd);
@@ -1269,22 +1268,22 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
if(UI.nRangeBegin != UI.nRangeEnd)
{
float fMsStart = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeBegin);
float fMsEnd = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeEnd);
float fMsEnd3 = fToMsCpu * MicroProfileLogTickDifference(nBaseTicksCpu, UI.nRangeEnd);
float fXStart = fMsStart * fMsToScreen;
float fXEnd = fMsEnd * fMsToScreen;
float fXEnd = fMsEnd3 * fMsToScreen;
MicroProfileDrawBox(fXStart, nBaseY, fXEnd, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT, MicroProfileBoxTypeFlat);
MicroProfileDrawLineVertical(fXStart, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT | 0x44000000);
MicroProfileDrawLineVertical(fXEnd, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT | 0x44000000);
fMsStart += fDetailedOffset;
fMsEnd += fDetailedOffset;
fMsEnd3 += fDetailedOffset;
char sBuffer[32];
uint32_t nLenStart = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsStart);
float fStartTextWidth = (float)((1+MICROPROFILE_TEXT_WIDTH) * nLenStart);
float fStartTextX = fXStart - fStartTextWidth - 2;
MicroProfileDrawBox(fStartTextX, nBaseY, fStartTextX + fStartTextWidth + 2, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
MicroProfileDrawText(fStartTextX+1, nBaseY, UINT32_MAX, sBuffer, nLenStart);
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd);
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd3);
MicroProfileDrawBox(fXEnd+1, nBaseY, fXEnd+1+(1+MICROPROFILE_TEXT_WIDTH) * nLenEnd + 3, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
MicroProfileDrawText(fXEnd+2, nBaseY+1, UINT32_MAX, sBuffer, nLenEnd);
@@ -1297,9 +1296,9 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
if(UI.nRangeBeginGpu != UI.nRangeEndGpu)
{
float fMsStart = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeBeginGpu);
float fMsEnd = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeEndGpu);
float fMsEnd4 = fToMsGpu * MicroProfileLogTickDifference(nBaseTicksGpu, UI.nRangeEndGpu);
float fXStart = fMsStart * fMsToScreen;
float fXEnd = fMsEnd * fMsToScreen;
float fXEnd = fMsEnd4 * fMsToScreen;
MicroProfileDrawBox(fXStart, nBaseY, fXEnd, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU, MicroProfileBoxTypeFlat);
MicroProfileDrawLineVertical(fXStart, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU | 0x44000000);
MicroProfileDrawLineVertical(fXEnd, nBaseY, nHeight, MICROPROFILE_FRAME_COLOR_HIGHTLIGHT_GPU | 0x44000000);
@@ -1307,14 +1306,14 @@ inline void MicroProfileDrawDetailedBars(uint32_t nWidth, uint32_t nHeight, int
nBaseY += MICROPROFILE_TEXT_HEIGHT+1;
fMsStart += fDetailedOffset;
fMsEnd += fDetailedOffset;
fMsEnd4 += fDetailedOffset;
char sBuffer[32];
uint32_t nLenStart = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsStart);
float fStartTextWidth = (float)((1+MICROPROFILE_TEXT_WIDTH) * nLenStart);
float fStartTextX = fXStart - fStartTextWidth - 2;
MicroProfileDrawBox(fStartTextX, nBaseY, fStartTextX + fStartTextWidth + 2, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
MicroProfileDrawText(fStartTextX+1, nBaseY, UINT32_MAX, sBuffer, nLenStart);
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd);
uint32_t nLenEnd = snprintf(sBuffer, sizeof(sBuffer)-1, "%.2fms", fMsEnd4);
MicroProfileDrawBox(fXEnd+1, nBaseY, fXEnd+1+(1+MICROPROFILE_TEXT_WIDTH) * nLenEnd + 3, MICROPROFILE_TEXT_HEIGHT + 2 + nBaseY, 0x33000000, MicroProfileBoxTypeFlat);
MicroProfileDrawText(fXEnd+2, nBaseY+1, UINT32_MAX, sBuffer, nLenEnd);
}
@@ -1716,8 +1715,8 @@ bool MicroProfileDrawGraph(uint32_t nScreenWidth, uint32_t nScreenHeight)
uint32_t nTextCount = 0;
uint32_t nGraphIndex = (S.nGraphPut + MICROPROFILE_GRAPH_HISTORY - int(MICROPROFILE_GRAPH_HISTORY*(1.f - fMouseXPrc))) % MICROPROFILE_GRAPH_HISTORY;
uint32_t nX = UI.nMouseX;
uint32_t nY = UI.nMouseY + 20;
uint32_t nMouseX = UI.nMouseX;
uint32_t nMouseY = UI.nMouseY + 20;
for(uint32_t i = 0; i < MICROPROFILE_MAX_GRAPHS; ++i)
{
@@ -1736,7 +1735,7 @@ bool MicroProfileDrawGraph(uint32_t nScreenWidth, uint32_t nScreenHeight)
}
if(nTextCount)
{
MicroProfileDrawFloatWindow(nX, nY, Strings.ppStrings, Strings.nNumStrings, 0, pColors);
MicroProfileDrawFloatWindow(nMouseX, nMouseY, Strings.ppStrings, Strings.nNumStrings, 0, pColors);
}
if(UI.nMouseRight)
@@ -2321,8 +2320,8 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
uint32_t nMenuX[MICROPROFILE_MENU_MAX] = {0};
uint32_t nNumMenuItems = 0;
int nLen = snprintf(buffer, 127, "MicroProfile");
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nLen);
int nMPTextLen = snprintf(buffer, 127, "MicroProfile");
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nMPTextLen);
nX += (sizeof("MicroProfile")+2) * (MICROPROFILE_TEXT_WIDTH+1);
pMenuText[nNumMenuItems++] = "Mode";
pMenuText[nNumMenuItems++] = "Groups";
@@ -2438,16 +2437,16 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
int nNumLines = 0;
bool bSelected = false;
const char* pString = CB(nNumLines, &bSelected);
uint32_t nWidth = 0, nHeight = 0;
uint32_t nTextWidth = 0, nTextHeight = 0;
while(pString)
{
nWidth = MicroProfileMax<int>(nWidth, (int)strlen(pString));
nTextWidth = MicroProfileMax<int>(nTextWidth, (int)strlen(pString));
nNumLines++;
pString = CB(nNumLines, &bSelected);
}
nWidth = (2+nWidth) * (MICROPROFILE_TEXT_WIDTH+1);
nHeight = nNumLines * (MICROPROFILE_TEXT_HEIGHT+1);
if(UI.nMouseY <= nY + nHeight+0 && UI.nMouseY >= nY-0 && UI.nMouseX <= nX + nWidth + 0 && UI.nMouseX >= nX - 0)
nTextWidth = (2+nTextWidth) * (MICROPROFILE_TEXT_WIDTH+1);
nTextHeight = nNumLines * (MICROPROFILE_TEXT_HEIGHT+1);
if(UI.nMouseY <= nY + nTextHeight+0 && UI.nMouseY >= nY-0 && UI.nMouseX <= nX + nTextWidth + 0 && UI.nMouseX >= nX - 0)
{
UI.nActiveMenu = nMenu;
}
@@ -2455,21 +2454,21 @@ inline void MicroProfileDrawMenu(uint32_t nWidth, uint32_t nHeight)
{
UI.nActiveMenu = UINT32_MAX;
}
MicroProfileDrawBox(nX, nY, nX + nWidth, nY + nHeight, 0xff000000|g_nMicroProfileBackColors[1]);
MicroProfileDrawBox(nX, nY, nX + nTextWidth, nY + nTextHeight, 0xff000000|g_nMicroProfileBackColors[1]);
for(int i = 0; i < nNumLines; ++i)
{
bool bSelected = false;
const char* pString = CB(i, &bSelected);
bool bSelected2 = false;
const char* pString2 = CB(i, &bSelected2);
if(UI.nMouseY >= nY && UI.nMouseY < nY + MICROPROFILE_TEXT_HEIGHT + 1)
{
if(UI.nMouseLeft || UI.nMouseRight)
{
CBClick[nMenu](i);
}
MicroProfileDrawBox(nX, nY, nX + nWidth, nY + MICROPROFILE_TEXT_HEIGHT + 1, 0xff888888);
MicroProfileDrawBox(nX, nY, nX + nTextWidth, nY + MICROPROFILE_TEXT_HEIGHT + 1, 0xff888888);
}
int nLen = snprintf(buffer, SBUF_SIZE-1, "%c %s", bSelected ? '*' : ' ' ,pString);
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nLen);
int nTextLen = snprintf(buffer, SBUF_SIZE-1, "%c %s", bSelected2 ? '*' : ' ' ,pString2);
MicroProfileDrawText(nX, nY, UINT32_MAX, buffer, nTextLen);
nY += MICROPROFILE_TEXT_HEIGHT+1;
}
}
@@ -2605,7 +2604,7 @@ inline void MicroProfileDrawCustom(uint32_t nWidth, uint32_t nHeight)
for(uint32_t i = 0; i < nCount; ++i)
{
nOffsetY += (1+MICROPROFILE_TEXT_HEIGHT);
uint32_t nWidth = MicroProfileMin(nMaxWidth, (uint32_t)(nMaxWidth * pMs[i] * fRcpReference));
nWidth = MicroProfileMin(nMaxWidth, (uint32_t)(nMaxWidth * pMs[i] * fRcpReference));
MicroProfileDrawBox(nMaxOffsetX, nOffsetY, nMaxOffsetX+nWidth, nOffsetY+MICROPROFILE_TEXT_HEIGHT, pColors[i]|0xff000000);
}
}

View File

@@ -65,6 +65,10 @@ if (MSVC)
/we4305 # 'context': truncation from 'type1' to 'type2'
/we4388 # 'expression': signed/unsigned mismatch
/we4389 # 'operator': signed/unsigned mismatch
/we4456 # Declaration of 'identifier' hides previous local declaration
/we4457 # Declaration of 'identifier' hides function parameter
/we4458 # Declaration of 'identifier' hides class member
/we4459 # Declaration of 'identifier' hides global declaration
/we4505 # 'function': unreferenced local function has been removed
/we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
@@ -92,6 +96,7 @@ else()
-Werror=missing-declarations
-Werror=missing-field-initializers
-Werror=reorder
-Werror=shadow
-Werror=sign-compare
-Werror=switch
-Werror=uninitialized

View File

@@ -49,9 +49,6 @@ if (NOT MSVC)
target_compile_options(audio_core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=shadow
-Werror=unused-parameter
-Werror=unused-variable
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>

View File

@@ -429,7 +429,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
in_params.node_id);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
}
}
@@ -1312,7 +1312,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::s
samples_to_read - samples_read, channel, temp_mix_offset);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
temp_mix_offset += samples_decoded;

View File

@@ -50,7 +50,7 @@ EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
effects[i] = std::make_unique<EffectBiquadFilter>();
break;
default:
UNREACHABLE_MSG("Unimplemented effect {}", effect);
ASSERT_MSG(false, "Unimplemented effect {}", effect);
effects[i] = std::make_unique<EffectStubbed>();
}
return GetInfo(i);
@@ -104,7 +104,7 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
return;
}

View File

@@ -483,7 +483,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
// Add more work
index_stack.push(j);
} else if (node_state == NodeStates::State::InFound) {
UNREACHABLE_MSG("Node start marked as found");
ASSERT_MSG(false, "Node start marked as found");
ResetState();
return false;
}

View File

@@ -114,7 +114,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
in_params.current_playstate = ServerPlayState::Play;
break;
default:
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
break;
}
@@ -410,7 +410,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
return in_params.should_depop;
}
default:
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
}
return false;

View File

@@ -58,6 +58,7 @@ add_library(common STATIC
div_ceil.h
dynamic_library.cpp
dynamic_library.h
elf.h
error.cpp
error.h
expected.h

View File

@@ -6,8 +6,13 @@
#include "common/settings.h"
void assert_handle_failure() {
void assert_fail_impl() {
if (Settings::values.use_debug_asserts) {
Crash();
}
}
[[noreturn]] void unreachable_impl() {
Crash();
throw std::runtime_error("Unreachable code");
}

View File

@@ -9,44 +9,43 @@
// Sometimes we want to try to continue even after hitting an assert.
// However touching this file yields a global recompilation as this header is included almost
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
void assert_handle_failure();
// For asserts we'd like to keep all the junk executed when an assert happens away from the
// important code in the function. One way of doing this is to put all the relevant code inside a
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
// template that calls the lambda. This seems to generate an extra instruction at the call-site
// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
// enough for our purposes.
template <typename Fn>
#if defined(_MSC_VER)
[[msvc::noinline]]
#elif defined(__GNUC__)
[[gnu::cold, gnu::noinline]]
void assert_fail_impl();
[[noreturn]] void unreachable_impl();
#ifdef _MSC_VER
#define YUZU_NO_INLINE __declspec(noinline)
#else
#define YUZU_NO_INLINE __attribute__((noinline))
#endif
static void
assert_noinline_call(const Fn& fn) {
fn();
assert_handle_failure();
}
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!"); \
assert_fail_impl(); \
} \
while (0)
}())
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
assert_fail_impl(); \
} \
while (0)
}())
#define UNREACHABLE() \
do { \
LOG_CRITICAL(Debug, "Unreachable code!"); \
unreachable_impl(); \
} while (0)
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE_MSG(...) \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
do { \
LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
unreachable_impl(); \
} while (0)
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)

View File

@@ -0,0 +1,167 @@
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
// SPDX-License-Identifier: MIT
#pragma once
#include <atomic>
#include <bit>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <new>
#include <stop_token>
#include <type_traits>
#include <utility>
namespace Common {
#if defined(__cpp_lib_hardware_interference_size)
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
#else
constexpr size_t hardware_interference_size = 64;
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4324)
#endif
template <typename T, size_t capacity = 0x400>
class MPSCQueue {
public:
explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
// Allocate one extra slot to prevent false sharing on the last slot
slots = allocator.allocate(capacity + 1);
// Allocators are not required to honor alignment for over-aligned types
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
// alignment here
if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
allocator.deallocate(slots, capacity + 1);
throw std::bad_alloc();
}
for (size_t i = 0; i < capacity; ++i) {
std::construct_at(&slots[i]);
}
static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
static_assert(alignof(Slot<T>) == hardware_interference_size,
"Slot must be aligned to cache line boundary to prevent false sharing");
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
"Slot size must be a multiple of cache line size to prevent "
"false sharing between adjacent slots");
static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
"Queue size must be a multiple of cache line size to "
"prevent false sharing between adjacent queues");
}
~MPSCQueue() noexcept {
for (size_t i = 0; i < capacity; ++i) {
std::destroy_at(&slots[i]);
}
allocator.deallocate(slots, capacity + 1);
}
// The queue must be both non-copyable and non-movable
MPSCQueue(const MPSCQueue&) = delete;
MPSCQueue& operator=(const MPSCQueue&) = delete;
MPSCQueue(MPSCQueue&&) = delete;
MPSCQueue& operator=(MPSCQueue&&) = delete;
void Push(const T& v) noexcept {
static_assert(std::is_nothrow_copy_constructible_v<T>,
"T must be nothrow copy constructible");
emplace(v);
}
template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
void Push(P&& v) noexcept {
emplace(std::forward<P>(v));
}
void Pop(T& v, std::stop_token stop) noexcept {
auto const tail = tail_.fetch_add(1);
auto& slot = slots[idx(tail)];
if (!slot.turn.test()) {
std::unique_lock lock{cv_mutex};
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
}
v = slot.move();
slot.destroy();
slot.turn.clear();
slot.turn.notify_one();
}
private:
template <typename U = T>
struct Slot {
~Slot() noexcept {
if (turn.test()) {
destroy();
}
}
template <typename... Args>
void construct(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
"T must be nothrow constructible with Args&&...");
std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
}
void destroy() noexcept {
static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
std::destroy_at(reinterpret_cast<U*>(&storage));
}
U&& move() noexcept {
return reinterpret_cast<U&&>(storage);
}
// Align to avoid false sharing between adjacent slots
alignas(hardware_interference_size) std::atomic_flag turn{};
struct aligned_store {
struct type {
alignas(U) unsigned char data[sizeof(U)];
};
};
typename aligned_store::type storage;
};
template <typename... Args>
void emplace(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
auto const head = head_.fetch_add(1);
auto& slot = slots[idx(head)];
slot.turn.wait(true);
slot.construct(std::forward<Args>(args)...);
slot.turn.test_and_set();
cv.notify_one();
}
constexpr size_t idx(size_t i) const noexcept {
return i & mask;
}
static constexpr size_t mask = capacity - 1;
// Align to avoid false sharing between head_ and tail_
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
std::mutex cv_mutex;
std::condition_variable_any cv;
Slot<T>* slots;
[[no_unique_address]] std::allocator<Slot<T>> allocator;
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
"T must be nothrow copy or move assignable");
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace Common

View File

@@ -33,9 +33,9 @@ void DetachedTasks::AddTask(std::function<void()> task) {
++instance->count;
std::thread([task{std::move(task)}]() {
task();
std::unique_lock lock{instance->mutex};
std::unique_lock thread_lock{instance->mutex};
--instance->count;
std::notify_all_at_thread_exit(instance->cv, std::move(lock));
std::notify_all_at_thread_exit(instance->cv, std::move(thread_lock));
}).detach();
}

333
src/common/elf.h Normal file
View File

@@ -0,0 +1,333 @@
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include "common_types.h"
namespace Common {
namespace ELF {
/* Type for a 16-bit quantity. */
using Elf32_Half = u16;
using Elf64_Half = u16;
/* Types for signed and unsigned 32-bit quantities. */
using Elf32_Word = u32;
using Elf32_Sword = s32;
using Elf64_Word = u32;
using Elf64_Sword = s32;
/* Types for signed and unsigned 64-bit quantities. */
using Elf32_Xword = u64;
using Elf32_Sxword = s64;
using Elf64_Xword = u64;
using Elf64_Sxword = s64;
/* Type of addresses. */
using Elf32_Addr = u32;
using Elf64_Addr = u64;
/* Type of file offsets. */
using Elf32_Off = u32;
using Elf64_Off = u64;
/* Type for section indices, which are 16-bit quantities. */
using Elf32_Section = u16;
using Elf64_Section = u16;
/* Type for version symbol information. */
using Elf32_Versym = Elf32_Half;
using Elf64_Versym = Elf64_Half;
constexpr size_t ElfIdentSize = 16;
/* The ELF file header. This appears at the start of every ELF file. */
struct Elf32_Ehdr {
std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
Elf32_Half e_type; /* Object file type */
Elf32_Half e_machine; /* Architecture */
Elf32_Word e_version; /* Object file version */
Elf32_Addr e_entry; /* Entry point virtual address */
Elf32_Off e_phoff; /* Program header table file offset */
Elf32_Off e_shoff; /* Section header table file offset */
Elf32_Word e_flags; /* Processor-specific flags */
Elf32_Half e_ehsize; /* ELF header size in bytes */
Elf32_Half e_phentsize; /* Program header table entry size */
Elf32_Half e_phnum; /* Program header table entry count */
Elf32_Half e_shentsize; /* Section header table entry size */
Elf32_Half e_shnum; /* Section header table entry count */
Elf32_Half e_shstrndx; /* Section header string table index */
};
struct Elf64_Ehdr {
std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
Elf64_Half e_type; /* Object file type */
Elf64_Half e_machine; /* Architecture */
Elf64_Word e_version; /* Object file version */
Elf64_Addr e_entry; /* Entry point virtual address */
Elf64_Off e_phoff; /* Program header table file offset */
Elf64_Off e_shoff; /* Section header table file offset */
Elf64_Word e_flags; /* Processor-specific flags */
Elf64_Half e_ehsize; /* ELF header size in bytes */
Elf64_Half e_phentsize; /* Program header table entry size */
Elf64_Half e_phnum; /* Program header table entry count */
Elf64_Half e_shentsize; /* Section header table entry size */
Elf64_Half e_shnum; /* Section header table entry count */
Elf64_Half e_shstrndx; /* Section header string table index */
};
constexpr u8 ElfClass32 = 1; /* 32-bit objects */
constexpr u8 ElfClass64 = 2; /* 64-bit objects */
constexpr u8 ElfData2Lsb = 1; /* 2's complement, little endian */
constexpr u8 ElfVersionCurrent = 1; /* EV_CURRENT */
constexpr u8 ElfOsAbiNone = 0; /* System V ABI */
constexpr u16 ElfTypeNone = 0; /* No file type */
constexpr u16 ElfTypeRel = 0; /* Relocatable file */
constexpr u16 ElfTypeExec = 0; /* Executable file */
constexpr u16 ElfTypeDyn = 0; /* Shared object file */
constexpr u16 ElfMachineArm = 40; /* ARM */
constexpr u16 ElfMachineAArch64 = 183; /* ARM AARCH64 */
constexpr std::array<u8, ElfIdentSize> Elf32Ident{
0x7f, 'E', 'L', 'F', ElfClass32, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
constexpr std::array<u8, ElfIdentSize> Elf64Ident{
0x7f, 'E', 'L', 'F', ElfClass64, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
/* Section header. */
struct Elf32_Shdr {
Elf32_Word sh_name; /* Section name (string tbl index) */
Elf32_Word sh_type; /* Section type */
Elf32_Word sh_flags; /* Section flags */
Elf32_Addr sh_addr; /* Section virtual addr at execution */
Elf32_Off sh_offset; /* Section file offset */
Elf32_Word sh_size; /* Section size in bytes */
Elf32_Word sh_link; /* Link to another section */
Elf32_Word sh_info; /* Additional section information */
Elf32_Word sh_addralign; /* Section alignment */
Elf32_Word sh_entsize; /* Entry size if section holds table */
};
struct Elf64_Shdr {
Elf64_Word sh_name; /* Section name (string tbl index) */
Elf64_Word sh_type; /* Section type */
Elf64_Xword sh_flags; /* Section flags */
Elf64_Addr sh_addr; /* Section virtual addr at execution */
Elf64_Off sh_offset; /* Section file offset */
Elf64_Xword sh_size; /* Section size in bytes */
Elf64_Word sh_link; /* Link to another section */
Elf64_Word sh_info; /* Additional section information */
Elf64_Xword sh_addralign; /* Section alignment */
Elf64_Xword sh_entsize; /* Entry size if section holds table */
};
constexpr u32 ElfShnUndef = 0; /* Undefined section */
constexpr u32 ElfShtNull = 0; /* Section header table entry unused */
constexpr u32 ElfShtProgBits = 1; /* Program data */
constexpr u32 ElfShtSymtab = 2; /* Symbol table */
constexpr u32 ElfShtStrtab = 3; /* String table */
constexpr u32 ElfShtRela = 4; /* Relocation entries with addends */
constexpr u32 ElfShtDynamic = 6; /* Dynamic linking information */
constexpr u32 ElfShtNobits = 7; /* Program space with no data (bss) */
constexpr u32 ElfShtRel = 9; /* Relocation entries, no addends */
constexpr u32 ElfShtDynsym = 11; /* Dynamic linker symbol table */
/* Symbol table entry. */
struct Elf32_Sym {
Elf32_Word st_name; /* Symbol name (string tbl index) */
Elf32_Addr st_value; /* Symbol value */
Elf32_Word st_size; /* Symbol size */
u8 st_info; /* Symbol type and binding */
u8 st_other; /* Symbol visibility */
Elf32_Section st_shndx; /* Section index */
};
struct Elf64_Sym {
Elf64_Word st_name; /* Symbol name (string tbl index) */
u8 st_info; /* Symbol type and binding */
u8 st_other; /* Symbol visibility */
Elf64_Section st_shndx; /* Section index */
Elf64_Addr st_value; /* Symbol value */
Elf64_Xword st_size; /* Symbol size */
};
/* How to extract and insert information held in the st_info field. */
static inline u8 ElfStBind(u8 st_info) {
return st_info >> 4;
}
static inline u8 ElfStType(u8 st_info) {
return st_info & 0xf;
}
static inline u8 ElfStInfo(u8 st_bind, u8 st_type) {
return static_cast<u8>((st_bind << 4) + (st_type & 0xf));
}
constexpr u8 ElfBindLocal = 0; /* Local symbol */
constexpr u8 ElfBindGlobal = 1; /* Global symbol */
constexpr u8 ElfBindWeak = 2; /* Weak symbol */
constexpr u8 ElfTypeUnspec = 0; /* Symbol type is unspecified */
constexpr u8 ElfTypeObject = 1; /* Symbol is a data object */
constexpr u8 ElfTypeFunc = 2; /* Symbol is a code object */
static inline u8 ElfStVisibility(u8 st_other) {
return static_cast<u8>(st_other & 0x3);
}
constexpr u8 ElfVisibilityDefault = 0; /* Default symbol visibility rules */
constexpr u8 ElfVisibilityInternal = 1; /* Processor specific hidden class */
constexpr u8 ElfVisibilityHidden = 2; /* Sym unavailable in other modules */
constexpr u8 ElfVisibilityProtected = 3; /* Not preemptible, not exported */
/* Relocation table entry without addend (in section of type ShtRel). */
struct Elf32_Rel {
Elf32_Addr r_offset; /* Address */
Elf32_Word r_info; /* Relocation type and symbol index */
};
/* Relocation table entry with addend (in section of type ShtRela). */
struct Elf32_Rela {
Elf32_Addr r_offset; /* Address */
Elf32_Word r_info; /* Relocation type and symbol index */
Elf32_Sword r_addend; /* Addend */
};
struct Elf64_Rela {
Elf64_Addr r_offset; /* Address */
Elf64_Xword r_info; /* Relocation type and symbol index */
Elf64_Sxword r_addend; /* Addend */
};
/* How to extract and insert information held in the r_info field. */
static inline u32 Elf32RelSymIndex(Elf32_Word r_info) {
return r_info >> 8;
}
static inline u8 Elf32RelType(Elf32_Word r_info) {
return static_cast<u8>(r_info & 0xff);
}
static inline Elf32_Word Elf32RelInfo(u32 sym_index, u8 type) {
return (sym_index << 8) + type;
}
static inline u32 Elf64RelSymIndex(Elf64_Xword r_info) {
return static_cast<u32>(r_info >> 32);
}
static inline u32 Elf64RelType(Elf64_Xword r_info) {
return r_info & 0xffffffff;
}
static inline Elf64_Xword Elf64RelInfo(u32 sym_index, u32 type) {
return (static_cast<Elf64_Xword>(sym_index) << 32) + type;
}
constexpr u32 ElfArmCopy = 20; /* Copy symbol at runtime */
constexpr u32 ElfArmGlobDat = 21; /* Create GOT entry */
constexpr u32 ElfArmJumpSlot = 22; /* Create PLT entry */
constexpr u32 ElfArmRelative = 23; /* Adjust by program base */
constexpr u32 ElfAArch64Copy = 1024; /* Copy symbol at runtime */
constexpr u32 ElfAArch64GlobDat = 1025; /* Create GOT entry */
constexpr u32 ElfAArch64JumpSlot = 1026; /* Create PLT entry */
constexpr u32 ElfAArch64Relative = 1027; /* Adjust by program base */
/* Program segment header. */
struct Elf32_Phdr {
Elf32_Word p_type; /* Segment type */
Elf32_Off p_offset; /* Segment file offset */
Elf32_Addr p_vaddr; /* Segment virtual address */
Elf32_Addr p_paddr; /* Segment physical address */
Elf32_Word p_filesz; /* Segment size in file */
Elf32_Word p_memsz; /* Segment size in memory */
Elf32_Word p_flags; /* Segment flags */
Elf32_Word p_align; /* Segment alignment */
};
struct Elf64_Phdr {
Elf64_Word p_type; /* Segment type */
Elf64_Word p_flags; /* Segment flags */
Elf64_Off p_offset; /* Segment file offset */
Elf64_Addr p_vaddr; /* Segment virtual address */
Elf64_Addr p_paddr; /* Segment physical address */
Elf64_Xword p_filesz; /* Segment size in file */
Elf64_Xword p_memsz; /* Segment size in memory */
Elf64_Xword p_align; /* Segment alignment */
};
/* Legal values for p_type (segment type). */
constexpr u32 ElfPtNull = 0; /* Program header table entry unused */
constexpr u32 ElfPtLoad = 1; /* Loadable program segment */
constexpr u32 ElfPtDynamic = 2; /* Dynamic linking information */
constexpr u32 ElfPtInterp = 3; /* Program interpreter */
constexpr u32 ElfPtNote = 4; /* Auxiliary information */
constexpr u32 ElfPtPhdr = 6; /* Entry for header table itself */
constexpr u32 ElfPtTls = 7; /* Thread-local storage segment */
/* Legal values for p_flags (segment flags). */
constexpr u32 ElfPfExec = 0; /* Segment is executable */
constexpr u32 ElfPfWrite = 1; /* Segment is writable */
constexpr u32 ElfPfRead = 2; /* Segment is readable */
/* Dynamic section entry. */
struct Elf32_Dyn {
Elf32_Sword d_tag; /* Dynamic entry type */
union {
Elf32_Word d_val; /* Integer value */
Elf32_Addr d_ptr; /* Address value */
} d_un;
};
struct Elf64_Dyn {
Elf64_Sxword d_tag; /* Dynamic entry type */
union {
Elf64_Xword d_val; /* Integer value */
Elf64_Addr d_ptr; /* Address value */
} d_un;
};
/* Legal values for d_tag (dynamic entry type). */
constexpr u32 ElfDtNull = 0; /* Marks end of dynamic section */
constexpr u32 ElfDtNeeded = 1; /* Name of needed library */
constexpr u32 ElfDtPltRelSz = 2; /* Size in bytes of PLT relocs */
constexpr u32 ElfDtPltGot = 3; /* Processor defined value */
constexpr u32 ElfDtHash = 4; /* Address of symbol hash table */
constexpr u32 ElfDtStrtab = 5; /* Address of string table */
constexpr u32 ElfDtSymtab = 6; /* Address of symbol table */
constexpr u32 ElfDtRela = 7; /* Address of Rela relocs */
constexpr u32 ElfDtRelasz = 8; /* Total size of Rela relocs */
constexpr u32 ElfDtRelaent = 9; /* Size of one Rela reloc */
constexpr u32 ElfDtStrsz = 10; /* Size of string table */
constexpr u32 ElfDtSyment = 11; /* Size of one symbol table entry */
constexpr u32 ElfDtInit = 12; /* Address of init function */
constexpr u32 ElfDtFini = 13; /* Address of termination function */
constexpr u32 ElfDtRel = 17; /* Address of Rel relocs */
constexpr u32 ElfDtRelsz = 18; /* Total size of Rel relocs */
constexpr u32 ElfDtRelent = 19; /* Size of one Rel reloc */
constexpr u32 ElfDtPltRel = 20; /* Type of reloc in PLT */
constexpr u32 ElfDtTextRel = 22; /* Reloc might modify .text */
constexpr u32 ElfDtJmpRel = 23; /* Address of PLT relocs */
constexpr u32 ElfDtBindNow = 24; /* Process relocations of object */
constexpr u32 ElfDtInitArray = 25; /* Array with addresses of init fct */
constexpr u32 ElfDtFiniArray = 26; /* Array with addresses of fini fct */
constexpr u32 ElfDtInitArraySz = 27; /* Size in bytes of DT_INIT_ARRAY */
constexpr u32 ElfDtFiniArraySz = 28; /* Size in bytes of DT_FINI_ARRAY */
constexpr u32 ElfDtSymtabShndx = 34; /* Address of SYMTAB_SHNDX section */
} // namespace ELF
} // namespace Common

View File

@@ -15,6 +15,9 @@ enum class PageType : u8 {
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
/// Page is mapped to regular memory, but inaccessible from CPU fastmem and must use
/// the callbacks.
DebugMemory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,

View File

@@ -76,7 +76,7 @@ std::string ParamPackage::Serialize() const {
std::string ParamPackage::Get(const std::string& key, const std::string& default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
LOG_DEBUG(Common, "key '{}' not found", key);
LOG_TRACE(Common, "key '{}' not found", key);
return default_value;
}
@@ -86,7 +86,7 @@ std::string ParamPackage::Get(const std::string& key, const std::string& default
int ParamPackage::Get(const std::string& key, int default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
LOG_DEBUG(Common, "key '{}' not found", key);
LOG_TRACE(Common, "key '{}' not found", key);
return default_value;
}
@@ -101,7 +101,7 @@ int ParamPackage::Get(const std::string& key, int default_value) const {
float ParamPackage::Get(const std::string& key, float default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
LOG_DEBUG(Common, "key {} not found", key);
LOG_TRACE(Common, "key {} not found", key);
return default_value;
}

View File

@@ -70,6 +70,7 @@ void LogSettings() {
log_path("DataStorage_NANDDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir));
log_path("DataStorage_SDMCDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::SDMCDir));
log_setting("Debugging_ProgramArgs", values.program_args.GetValue());
log_setting("Debugging_GDBStub", values.use_gdbstub.GetValue());
log_setting("Input_EnableMotion", values.motion_enabled.GetValue());
log_setting("Input_EnableVibration", values.vibration_enabled.GetValue());
log_setting("Input_EnableRawInput", values.enable_raw_input.GetValue());
@@ -146,7 +147,7 @@ void UpdateRescalingInfo() {
info.down_shift = 0;
break;
default:
UNREACHABLE();
ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
}

View File

@@ -496,7 +496,7 @@ struct Values {
// Renderer
RangedSetting<RendererBackend> renderer_backend{
RendererBackend::OpenGL, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
RendererBackend::Vulkan, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
BasicSetting<bool> renderer_debug{false, "debug"};
BasicSetting<bool> renderer_shader_feedback{false, "shader_feedback"};
BasicSetting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};
@@ -601,7 +601,7 @@ struct Values {
// Debugging
bool record_frame_times;
BasicSetting<bool> use_gdbstub{false, "use_gdbstub"};
BasicSetting<u16> gdbstub_port{0, "gdbstub_port"};
BasicSetting<u16> gdbstub_port{6543, "gdbstub_port"};
BasicSetting<std::string> program_args{std::string(), "program_args"};
BasicSetting<bool> dump_exefs{false, "dump_exefs"};
BasicSetting<bool> dump_nso{false, "dump_nso"};

View File

@@ -36,6 +36,13 @@ add_library(core STATIC
crypto/ctr_encryption_layer.h
crypto/xts_encryption_layer.cpp
crypto/xts_encryption_layer.h
debugger/debugger_interface.h
debugger/debugger.cpp
debugger/debugger.h
debugger/gdbstub_arch.cpp
debugger/gdbstub_arch.h
debugger/gdbstub.cpp
debugger/gdbstub.h
device_memory.cpp
device_memory.h
file_sys/bis_factory.cpp
@@ -736,16 +743,11 @@ if (MSVC)
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4456 # Declaration of 'identifier' hides previous local declaration
/we4457 # Declaration of 'identifier' hides function parameter
/we4458 # Declaration of 'identifier' hides class member
/we4459 # Declaration of 'identifier' hides global declaration
)
else()
target_compile_options(core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=shadow
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
@@ -761,6 +763,9 @@ create_target_directory_groups(core)
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::Opus)
if (MINGW)
target_link_libraries(core PRIVATE ${MSWSOCK_LIBRARY})
endif()
if (ENABLE_WEB_SERVICE)
target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE)

View File

@@ -9,7 +9,9 @@
#include "core/arm/arm_interface.h"
#include "core/arm/symbols.h"
#include "core/core.h"
#include "core/debugger/debugger.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
#include "core/loader/loader.h"
#include "core/memory.h"
@@ -88,4 +90,89 @@ void ARM_Interface::LogBacktrace() const {
}
}
void ARM_Interface::Run() {
using Kernel::StepState;
using Kernel::SuspendType;
while (true) {
Kernel::KThread* current_thread{Kernel::GetCurrentThreadPointer(system.Kernel())};
Dynarmic::HaltReason hr{};
// Notify the debugger and go to sleep if a step was performed
// and this thread has been scheduled again.
if (current_thread->GetStepState() == StepState::StepPerformed) {
system.GetDebugger().NotifyThreadStopped(current_thread);
current_thread->RequestSuspend(SuspendType::Debug);
break;
}
// Otherwise, run the thread.
system.EnterDynarmicProfile();
if (current_thread->GetStepState() == StepState::StepPending) {
hr = StepJit();
if (Has(hr, step_thread)) {
current_thread->SetStepState(StepState::StepPerformed);
}
} else {
hr = RunJit();
}
system.ExitDynarmicProfile();
// Notify the debugger and go to sleep if a breakpoint was hit.
if (Has(hr, breakpoint)) {
RewindBreakpointInstruction();
system.GetDebugger().NotifyThreadStopped(current_thread);
current_thread->RequestSuspend(SuspendType::Debug);
break;
}
if (Has(hr, watchpoint)) {
RewindBreakpointInstruction();
system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint());
current_thread->RequestSuspend(SuspendType::Debug);
break;
}
// Handle syscalls and scheduling (this may change the current thread)
if (Has(hr, svc_call)) {
Kernel::Svc::Call(system, GetSvcNumber());
}
if (Has(hr, break_loop) || !uses_wall_clock) {
break;
}
}
}
void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
watchpoints = &wp;
}
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const {
if (!watchpoints) {
return nullptr;
}
const VAddr start_address{addr};
const VAddr end_address{addr + size};
for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
const auto& watch{(*watchpoints)[i]};
if (end_address <= watch.start_address) {
continue;
}
if (start_address >= watch.end_address) {
continue;
}
if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {
continue;
}
return &watch;
}
return nullptr;
}
} // namespace Core

View File

@@ -5,7 +5,11 @@
#pragma once
#include <array>
#include <span>
#include <vector>
#include <dynarmic/interface/halt_reason.h>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/hardware_properties.h"
@@ -16,13 +20,16 @@ struct PageTable;
namespace Kernel {
enum class VMAPermission : u8;
}
enum class DebugWatchpointType : u8;
struct DebugWatchpoint;
} // namespace Kernel
namespace Core {
class System;
class CPUInterruptHandler;
using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
using WatchpointArray = std::array<Kernel::DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>;
/// Generic ARMv8 CPU interface
class ARM_Interface {
@@ -64,10 +71,7 @@ public:
static_assert(sizeof(ThreadContext64) == 0x320);
/// Runs the CPU until an event happens
virtual void Run() = 0;
/// Step CPU by one instruction
virtual void Step() = 0;
void Run();
/// Clear all instruction cache
virtual void ClearInstructionCache() = 0;
@@ -170,6 +174,7 @@ public:
virtual void SaveContext(ThreadContext64& ctx) = 0;
virtual void LoadContext(const ThreadContext32& ctx) = 0;
virtual void LoadContext(const ThreadContext64& ctx) = 0;
void LoadWatchpointArray(const WatchpointArray& wp);
/// Clears the exclusive monitor's state.
virtual void ClearExclusiveState() = 0;
@@ -194,13 +199,28 @@ public:
void LogBacktrace() const;
static constexpr Dynarmic::HaltReason step_thread = Dynarmic::HaltReason::Step;
static constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4;
static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5;
protected:
/// System context that this ARM interface is running under.
System& system;
CPUInterrupts& interrupt_handlers;
const WatchpointArray* watchpoints;
bool uses_wall_clock;
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
const Kernel::DebugWatchpoint* MatchingWatchpoint(
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const;
virtual Dynarmic::HaltReason RunJit() = 0;
virtual Dynarmic::HaltReason StepJit() = 0;
virtual u32 GetSvcNumber() const = 0;
virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;
virtual void RewindBreakpointInstruction() = 0;
};
} // namespace Core

View File

@@ -17,6 +17,8 @@
#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/debugger/debugger.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
#include "core/memory.h"
@@ -24,51 +26,65 @@ namespace Core {
using namespace Common::Literals;
constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_)
: parent{parent_}, memory(parent.system.Memory()) {}
: parent{parent_},
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()} {}
u8 MemoryRead8(u32 vaddr) override {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return memory.Read8(vaddr);
}
u16 MemoryRead16(u32 vaddr) override {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return memory.Read16(vaddr);
}
u32 MemoryRead32(u32 vaddr) override {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return memory.Read32(vaddr);
}
u64 MemoryRead64(u32 vaddr) override {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return memory.Read64(vaddr);
}
void MemoryWrite8(u32 vaddr, u8 value) override {
memory.Write8(vaddr, value);
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
memory.Write8(vaddr, value);
}
}
void MemoryWrite16(u32 vaddr, u16 value) override {
memory.Write16(vaddr, value);
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
memory.Write16(vaddr, value);
}
}
void MemoryWrite32(u32 vaddr, u32 value) override {
memory.Write32(vaddr, value);
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
memory.Write32(vaddr, value);
}
}
void MemoryWrite64(u32 vaddr, u64 value) override {
memory.Write64(vaddr, value);
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
memory.Write64(vaddr, value);
}
}
bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
return memory.WriteExclusive8(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive8(vaddr, value, expected);
}
bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
return memory.WriteExclusive16(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive16(vaddr, value, expected);
}
bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
return memory.WriteExclusive32(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive32(vaddr, value, expected);
}
bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
return memory.WriteExclusive64(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive64(vaddr, value, expected);
}
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
@@ -78,16 +94,21 @@ public:
}
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
if (debugger_enabled) {
parent.SaveContext(parent.breakpoint_context);
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
return;
}
parent.LogBacktrace();
LOG_CRITICAL(Core_ARM,
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
UNIMPLEMENTED();
}
void CallSVC(u32 swi) override {
parent.svc_swi = swi;
parent.jit.load()->HaltExecution(svc_call);
parent.jit.load()->HaltExecution(ARM_Interface::svc_call);
}
void AddTicks(u64 ticks) override {
@@ -113,9 +134,26 @@ public:
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
}
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
if (!debugger_enabled) {
return true;
}
const auto match{parent.MatchingWatchpoint(addr, size, type)};
if (match) {
parent.SaveContext(parent.breakpoint_context);
parent.jit.load()->HaltExecution(ARM_Interface::watchpoint);
parent.halted_watchpoint = match;
return false;
}
return true;
}
ARM_Dynarmic_32& parent;
Core::Memory::Memory& memory;
std::size_t num_interpreted_instructions{};
bool debugger_enabled{};
static constexpr u64 minimum_run_cycles = 1000U;
};
@@ -150,6 +188,11 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
config.code_cache_size = 512_MiB;
config.far_code_offset = 400_MiB;
// Allow memory fault handling to work
if (system.DebuggerEnabled()) {
config.check_halt_on_memory_access = true;
}
// null_jit
if (!page_table) {
// Don't waste too much memory on null_jit
@@ -232,20 +275,24 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
return std::make_unique<Dynarmic::A32::Jit>(config);
}
void ARM_Dynarmic_32::Run() {
while (true) {
const auto hr = jit.load()->Run();
if (Has(hr, svc_call)) {
Kernel::Svc::Call(system, svc_swi);
}
if (Has(hr, break_loop) || !uses_wall_clock) {
break;
}
}
Dynarmic::HaltReason ARM_Dynarmic_32::RunJit() {
return jit.load()->Run();
}
void ARM_Dynarmic_32::Step() {
jit.load()->Step();
Dynarmic::HaltReason ARM_Dynarmic_32::StepJit() {
return jit.load()->Step();
}
u32 ARM_Dynarmic_32::GetSvcNumber() const {
return svc_swi;
}
const Kernel::DebugWatchpoint* ARM_Dynarmic_32::HaltedWatchpoint() const {
return halted_watchpoint;
}
void ARM_Dynarmic_32::RewindBreakpointInstruction() {
LoadContext(breakpoint_context);
}
ARM_Dynarmic_32::ARM_Dynarmic_32(System& system_, CPUInterrupts& interrupt_handlers_,

View File

@@ -41,8 +41,6 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
void Run() override;
void Step() override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
@@ -70,6 +68,13 @@ public:
std::vector<BacktraceEntry> GetBacktrace() const override;
protected:
Dynarmic::HaltReason RunJit() override;
Dynarmic::HaltReason StepJit() override;
u32 GetSvcNumber() const override;
const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
void RewindBreakpointInstruction() override;
private:
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
@@ -95,6 +100,10 @@ private:
// SVC callback
u32 svc_swi{};
// Watchpoint info
const Kernel::DebugWatchpoint* halted_watchpoint;
ThreadContext32 breakpoint_context;
};
} // namespace Core

View File

@@ -15,6 +15,7 @@
#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/debugger/debugger.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/svc.h"
@@ -25,61 +26,79 @@ namespace Core {
using Vector = Dynarmic::A64::Vector;
using namespace Common::Literals;
constexpr Dynarmic::HaltReason break_loop = Dynarmic::HaltReason::UserDefined2;
constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3;
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_)
: parent{parent_}, memory(parent.system.Memory()) {}
: parent{parent_},
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()} {}
u8 MemoryRead8(u64 vaddr) override {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return memory.Read8(vaddr);
}
u16 MemoryRead16(u64 vaddr) override {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return memory.Read16(vaddr);
}
u32 MemoryRead32(u64 vaddr) override {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return memory.Read32(vaddr);
}
u64 MemoryRead64(u64 vaddr) override {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return memory.Read64(vaddr);
}
Vector MemoryRead128(u64 vaddr) override {
CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
return {memory.Read64(vaddr), memory.Read64(vaddr + 8)};
}
void MemoryWrite8(u64 vaddr, u8 value) override {
memory.Write8(vaddr, value);
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
memory.Write8(vaddr, value);
}
}
void MemoryWrite16(u64 vaddr, u16 value) override {
memory.Write16(vaddr, value);
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
memory.Write16(vaddr, value);
}
}
void MemoryWrite32(u64 vaddr, u32 value) override {
memory.Write32(vaddr, value);
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
memory.Write32(vaddr, value);
}
}
void MemoryWrite64(u64 vaddr, u64 value) override {
memory.Write64(vaddr, value);
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
memory.Write64(vaddr, value);
}
}
void MemoryWrite128(u64 vaddr, Vector value) override {
memory.Write64(vaddr, value[0]);
memory.Write64(vaddr + 8, value[1]);
if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) {
memory.Write64(vaddr, value[0]);
memory.Write64(vaddr + 8, value[1]);
}
}
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
return memory.WriteExclusive8(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive8(vaddr, value, expected);
}
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
return memory.WriteExclusive16(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive16(vaddr, value, expected);
}
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
return memory.WriteExclusive32(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive32(vaddr, value, expected);
}
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
return memory.WriteExclusive64(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive64(vaddr, value, expected);
}
bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
return memory.WriteExclusive128(vaddr, value, expected);
return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) &&
memory.WriteExclusive128(vaddr, value, expected);
}
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
@@ -119,8 +138,13 @@ public:
case Dynarmic::A64::Exception::SendEventLocal:
case Dynarmic::A64::Exception::Yield:
return;
case Dynarmic::A64::Exception::Breakpoint:
default:
if (debugger_enabled) {
parent.SaveContext(parent.breakpoint_context);
parent.jit.load()->HaltExecution(ARM_Interface::breakpoint);
return;
}
parent.LogBacktrace();
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
@@ -129,7 +153,7 @@ public:
void CallSVC(u32 swi) override {
parent.svc_swi = swi;
parent.jit.load()->HaltExecution(svc_call);
parent.jit.load()->HaltExecution(ARM_Interface::svc_call);
}
void AddTicks(u64 ticks) override {
@@ -157,10 +181,27 @@ public:
return parent.system.CoreTiming().GetClockTicks();
}
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
if (!debugger_enabled) {
return true;
}
const auto match{parent.MatchingWatchpoint(addr, size, type)};
if (match) {
parent.SaveContext(parent.breakpoint_context);
parent.jit.load()->HaltExecution(ARM_Interface::watchpoint);
parent.halted_watchpoint = match;
return false;
}
return true;
}
ARM_Dynarmic_64& parent;
Core::Memory::Memory& memory;
u64 tpidrro_el0 = 0;
u64 tpidr_el0 = 0;
bool debugger_enabled{};
static constexpr u64 minimum_run_cycles = 1000U;
};
@@ -211,6 +252,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
config.code_cache_size = 512_MiB;
config.far_code_offset = 400_MiB;
// Allow memory fault handling to work
if (system.DebuggerEnabled()) {
config.check_halt_on_memory_access = true;
}
// null_jit
if (!page_table) {
// Don't waste too much memory on null_jit
@@ -293,20 +339,24 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable*
return std::make_shared<Dynarmic::A64::Jit>(config);
}
void ARM_Dynarmic_64::Run() {
while (true) {
const auto hr = jit.load()->Run();
if (Has(hr, svc_call)) {
Kernel::Svc::Call(system, svc_swi);
}
if (Has(hr, break_loop) || !uses_wall_clock) {
break;
}
}
Dynarmic::HaltReason ARM_Dynarmic_64::RunJit() {
return jit.load()->Run();
}
void ARM_Dynarmic_64::Step() {
jit.load()->Step();
Dynarmic::HaltReason ARM_Dynarmic_64::StepJit() {
return jit.load()->Step();
}
u32 ARM_Dynarmic_64::GetSvcNumber() const {
return svc_swi;
}
const Kernel::DebugWatchpoint* ARM_Dynarmic_64::HaltedWatchpoint() const {
return halted_watchpoint;
}
void ARM_Dynarmic_64::RewindBreakpointInstruction() {
LoadContext(breakpoint_context);
}
ARM_Dynarmic_64::ARM_Dynarmic_64(System& system_, CPUInterrupts& interrupt_handlers_,

View File

@@ -39,8 +39,6 @@ public:
void SetVectorReg(int index, u128 value) override;
u32 GetPSTATE() const override;
void SetPSTATE(u32 pstate) override;
void Run() override;
void Step() override;
VAddr GetTlsAddress() const override;
void SetTlsAddress(VAddr address) override;
void SetTPIDR_EL0(u64 value) override;
@@ -64,6 +62,13 @@ public:
std::vector<BacktraceEntry> GetBacktrace() const override;
protected:
Dynarmic::HaltReason RunJit() override;
Dynarmic::HaltReason StepJit() override;
u32 GetSvcNumber() const override;
const Kernel::DebugWatchpoint* HaltedWatchpoint() const override;
void RewindBreakpointInstruction() override;
private:
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
std::size_t address_space_bits) const;
@@ -88,6 +93,10 @@ private:
// SVC callback
u32 svc_swi{};
// Breakpoint info
const Kernel::DebugWatchpoint* halted_watchpoint;
ThreadContext64 breakpoint_context;
};
} // namespace Core

View File

@@ -3,73 +3,14 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/elf.h"
#include "core/arm/symbols.h"
#include "core/core.h"
#include "core/memory.h"
using namespace Common::ELF;
namespace Core {
namespace {
constexpr u64 ELF_DYNAMIC_TAG_NULL = 0;
constexpr u64 ELF_DYNAMIC_TAG_STRTAB = 5;
constexpr u64 ELF_DYNAMIC_TAG_SYMTAB = 6;
constexpr u64 ELF_DYNAMIC_TAG_SYMENT = 11;
enum class ELFSymbolType : u8 {
None = 0,
Object = 1,
Function = 2,
Section = 3,
File = 4,
Common = 5,
TLS = 6,
};
enum class ELFSymbolBinding : u8 {
Local = 0,
Global = 1,
Weak = 2,
};
enum class ELFSymbolVisibility : u8 {
Default = 0,
Internal = 1,
Hidden = 2,
Protected = 3,
};
struct ELF64Symbol {
u32 name_index;
union {
u8 info;
BitField<0, 4, ELFSymbolType> type;
BitField<4, 4, ELFSymbolBinding> binding;
};
ELFSymbolVisibility visibility;
u16 sh_index;
u64 value;
u64 size;
};
static_assert(sizeof(ELF64Symbol) == 0x18, "ELF64Symbol has incorrect size.");
struct ELF32Symbol {
u32 name_index;
u32 value;
u32 size;
union {
u8 info;
BitField<0, 4, ELFSymbolType> type;
BitField<4, 4, ELFSymbolBinding> binding;
};
ELFSymbolVisibility visibility;
u16 sh_index;
};
static_assert(sizeof(ELF32Symbol) == 0x10, "ELF32Symbol has incorrect size.");
} // Anonymous namespace
namespace Symbols {
template <typename Word, typename ELFSymbol, typename ByteReader>
@@ -110,15 +51,15 @@ static Symbols GetSymbols(ByteReader ReadBytes) {
const Word value = ReadWord(dynamic_index + sizeof(Word));
dynamic_index += 2 * sizeof(Word);
if (tag == ELF_DYNAMIC_TAG_NULL) {
if (tag == ElfDtNull) {
break;
}
if (tag == ELF_DYNAMIC_TAG_STRTAB) {
if (tag == ElfDtStrtab) {
string_table_offset = value;
} else if (tag == ELF_DYNAMIC_TAG_SYMTAB) {
} else if (tag == ElfDtSymtab) {
symbol_table_offset = value;
} else if (tag == ELF_DYNAMIC_TAG_SYMENT) {
} else if (tag == ElfDtSyment) {
symbol_entry_size = value;
}
}
@@ -134,14 +75,14 @@ static Symbols GetSymbols(ByteReader ReadBytes) {
ELFSymbol symbol{};
ReadBytes(&symbol, symbol_index, sizeof(ELFSymbol));
VAddr string_offset = string_table_offset + symbol.name_index;
VAddr string_offset = string_table_offset + symbol.st_name;
std::string name;
for (u8 c = Read8(string_offset); c != 0; c = Read8(++string_offset)) {
name += static_cast<char>(c);
}
symbol_index += symbol_entry_size;
out[name] = std::make_pair(symbol.value, symbol.size);
out[name] = std::make_pair(symbol.st_value, symbol.st_size);
}
return out;
@@ -152,9 +93,9 @@ Symbols GetSymbols(VAddr base, Core::Memory::Memory& memory, bool is_64) {
[&](void* ptr, size_t offset, size_t size) { memory.ReadBlock(base + offset, ptr, size); }};
if (is_64) {
return GetSymbols<u64, ELF64Symbol>(ReadBytes);
return GetSymbols<u64, Elf64_Sym>(ReadBytes);
} else {
return GetSymbols<u32, ELF32Symbol>(ReadBytes);
return GetSymbols<u32, Elf32_Sym>(ReadBytes);
}
}
@@ -164,9 +105,9 @@ Symbols GetSymbols(std::span<const u8> data, bool is_64) {
}};
if (is_64) {
return GetSymbols<u64, ELF64Symbol>(ReadBytes);
return GetSymbols<u64, Elf64_Sym>(ReadBytes);
} else {
return GetSymbols<u32, ELF32Symbol>(ReadBytes);
return GetSymbols<u32, Elf32_Sym>(ReadBytes);
}
}

View File

@@ -17,6 +17,7 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/debugger/debugger.h"
#include "core/device_memory.h"
#include "core/file_sys/bis_factory.h"
#include "core/file_sys/mode.h"
@@ -137,7 +138,6 @@ struct System::Impl {
kernel.Suspend(false);
core_timing.SyncPause(false);
cpu_manager.Pause(false);
is_paused = false;
return status;
@@ -149,28 +149,29 @@ struct System::Impl {
core_timing.SyncPause(true);
kernel.Suspend(true);
cpu_manager.Pause(true);
is_paused = true;
return status;
}
std::unique_lock<std::mutex> StallCPU() {
std::unique_lock<std::mutex> StallProcesses() {
std::unique_lock<std::mutex> lk(suspend_guard);
kernel.Suspend(true);
core_timing.SyncPause(true);
cpu_manager.Pause(true);
return lk;
}
void UnstallCPU() {
void UnstallProcesses() {
if (!is_paused) {
core_timing.SyncPause(false);
kernel.Suspend(false);
cpu_manager.Pause(false);
}
}
void InitializeDebugger(System& system, u16 port) {
debugger = std::make_unique<Debugger>(system, port);
}
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
LOG_DEBUG(Core, "initialized OK");
@@ -329,6 +330,9 @@ struct System::Impl {
gpu_core->NotifyShutdown();
}
kernel.ShutdownCores();
cpu_manager.Shutdown();
debugger.reset();
services.reset();
service_manager.reset();
cheat_engine.reset();
@@ -436,6 +440,9 @@ struct System::Impl {
/// Network instance
Network::NetworkInstance network_instance;
/// Debugger
std::unique_ptr<Core::Debugger> debugger;
SystemResultStatus status = SystemResultStatus::Success;
std::string status_details = "";
@@ -472,10 +479,6 @@ SystemResultStatus System::Pause() {
return impl->Pause();
}
SystemResultStatus System::SingleStep() {
return SystemResultStatus::Success;
}
void System::InvalidateCpuInstructionCaches() {
impl->kernel.InvalidateAllInstructionCaches();
}
@@ -488,12 +491,22 @@ void System::Shutdown() {
impl->Shutdown();
}
std::unique_lock<std::mutex> System::StallCPU() {
return impl->StallCPU();
void System::DetachDebugger() {
if (impl->debugger) {
impl->debugger->NotifyShutdown();
}
}
void System::UnstallCPU() {
impl->UnstallCPU();
std::unique_lock<std::mutex> System::StallProcesses() {
return impl->StallProcesses();
}
void System::UnstallProcesses() {
impl->UnstallProcesses();
}
void System::InitializeDebugger() {
impl->InitializeDebugger(*this, Settings::values.gdbstub_port.GetValue());
}
SystemResultStatus System::Load(Frontend::EmuWindow& emu_window, const std::string& filepath,
@@ -809,6 +822,18 @@ bool System::IsMulticore() const {
return impl->is_multicore;
}
bool System::DebuggerEnabled() const {
return Settings::values.use_gdbstub.GetValue();
}
Core::Debugger& System::GetDebugger() {
return *impl->debugger;
}
const Core::Debugger& System::GetDebugger() const {
return *impl->debugger;
}
void System::RegisterExecuteProgramCallback(ExecuteProgramCallback&& callback) {
impl->execute_program_callback = std::move(callback);
}

View File

@@ -97,6 +97,7 @@ namespace Core {
class ARM_Interface;
class CpuManager;
class Debugger;
class DeviceMemory;
class ExclusiveMonitor;
class SpeedLimiter;
@@ -147,12 +148,6 @@ public:
*/
[[nodiscard]] SystemResultStatus Pause();
/**
* Step the CPU one instruction
* @return Result status, indicating whether or not the operation succeeded.
*/
[[nodiscard]] SystemResultStatus SingleStep();
/**
* Invalidate the CPU instruction caches
* This function should only be used by GDB Stub to support breakpoints, memory updates and
@@ -165,8 +160,16 @@ public:
/// Shutdown the emulated system.
void Shutdown();
std::unique_lock<std::mutex> StallCPU();
void UnstallCPU();
/// Forcibly detach the debugger if it is running.
void DetachDebugger();
std::unique_lock<std::mutex> StallProcesses();
void UnstallProcesses();
/**
* Initialize the debugger.
*/
void InitializeDebugger();
/**
* Load an executable application.
@@ -354,6 +357,9 @@ public:
[[nodiscard]] Service::Time::TimeManager& GetTimeManager();
[[nodiscard]] const Service::Time::TimeManager& GetTimeManager() const;
[[nodiscard]] Core::Debugger& GetDebugger();
[[nodiscard]] const Core::Debugger& GetDebugger() const;
void SetExitLock(bool locked);
[[nodiscard]] bool GetExitLock() const;
@@ -375,6 +381,9 @@ public:
/// Tells if system is running on multicore.
[[nodiscard]] bool IsMulticore() const;
/// Tells if the system debugger is enabled.
[[nodiscard]] bool DebuggerEnabled() const;
/// Type used for the frontend to designate a callback for System to re-launch the application
/// using a specified program index.
using ExecuteProgramCallback = std::function<void(std::size_t)>;

View File

@@ -21,23 +21,24 @@ CpuManager::~CpuManager() = default;
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
std::size_t core) {
cpu_manager.RunThread(stop_token, core);
cpu_manager.RunThread(core);
}
void CpuManager::Initialize() {
running_mode = true;
if (is_multicore) {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
} else {
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1;
gpu_barrier = std::make_unique<Common::Barrier>(num_cores + 1);
for (std::size_t core = 0; core < num_cores; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
}
void CpuManager::Shutdown() {
running_mode = false;
Pause(false);
for (std::size_t core = 0; core < num_cores; core++) {
if (core_data[core].host_thread.joinable()) {
core_data[core].host_thread.join();
}
}
}
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
@@ -48,8 +49,8 @@ std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
return IdleThreadFunction;
}
std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
return SuspendThreadFunction;
std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() {
return ShutdownThreadFunction;
}
void CpuManager::GuestThreadFunction(void* cpu_manager_) {
@@ -79,17 +80,12 @@ void CpuManager::IdleThreadFunction(void* cpu_manager_) {
}
}
void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
if (cpu_manager->is_multicore) {
cpu_manager->MultiCoreRunSuspendThread();
} else {
cpu_manager->SingleCoreRunSuspendThread();
}
void CpuManager::ShutdownThreadFunction(void* cpu_manager) {
static_cast<CpuManager*>(cpu_manager)->ShutdownThread();
}
void* CpuManager::GetStartFuncParamater() {
return static_cast<void*>(this);
void* CpuManager::GetStartFuncParameter() {
return this;
}
///////////////////////////////////////////////////////////////////////////////
@@ -99,7 +95,7 @@ void* CpuManager::GetStartFuncParamater() {
void CpuManager::MultiCoreRunGuestThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint(GuestRewindFunction, this);
MultiCoreRunGuestLoop();
@@ -110,12 +106,10 @@ void CpuManager::MultiCoreRunGuestLoop() {
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
system.EnterDynarmicProfile();
while (!physical_core->IsInterrupted()) {
physical_core->Run();
physical_core = &kernel.CurrentPhysicalCore();
}
system.ExitDynarmicProfile();
{
Kernel::KScopedDisableDispatch dd(kernel);
physical_core->ArmInterface().ClearExclusiveState();
@@ -131,58 +125,6 @@ void CpuManager::MultiCoreRunIdleThread() {
}
}
void CpuManager::MultiCoreRunSuspendThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::MultiCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = true;
for (const auto& data : core_data) {
all_not_barrier &= !data.is_running.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.enter_barrier->Set();
}
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.exit_barrier->Set();
}
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
/// Don't release the barrier
}
paused_state = paused;
}
///////////////////////////////////////////////////////////////////////////////
/// SingleCore ///
///////////////////////////////////////////////////////////////////////////////
@@ -190,7 +132,7 @@ void CpuManager::MultiCorePause(bool paused) {
void CpuManager::SingleCoreRunGuestThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint(GuestRewindFunction, this);
SingleCoreRunGuestLoop();
@@ -200,12 +142,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
auto& kernel = system.Kernel();
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
system.EnterDynarmicProfile();
if (!physical_core->IsInterrupted()) {
physical_core->Run();
physical_core = &kernel.CurrentPhysicalCore();
}
system.ExitDynarmicProfile();
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
kernel.SetIsPhantomModeForSingleCore(false);
@@ -228,25 +168,11 @@ void CpuManager::SingleCoreRunIdleThread() {
}
}
void CpuManager::SingleCoreRunSuspendThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
{
auto& kernel = system.Kernel();
auto& scheduler = kernel.Scheduler(current_core);
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread();
if (idle_count >= 4 || from_running_enviroment) {
if (!from_running_enviroment) {
system.CoreTiming().Idle();
@@ -258,7 +184,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
}
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
system.CoreTiming().ResetTicks();
scheduler.Unload(scheduler.GetCurrentThread());
scheduler.Unload(scheduler.GetSchedulerCurrentThread());
auto& next_scheduler = kernel.Scheduler(current_core);
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
@@ -267,47 +193,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
// May have changed scheduler
{
auto& scheduler = system.Kernel().Scheduler(current_core);
scheduler.Reload(scheduler.GetCurrentThread());
if (!scheduler.IsIdle()) {
idle_count = 0;
}
scheduler.Reload(scheduler.GetSchedulerCurrentThread());
idle_count = 0;
}
}
void CpuManager::SingleCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
}
core_data[0].enter_barrier->Set();
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
core_data[0].exit_barrier->Set();
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
/// Don't release the barrier
}
paused_state = paused;
void CpuManager::ShutdownThread() {
auto& kernel = system.Kernel();
auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0;
auto* current_thread = kernel.GetCurrentEmuThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
UNREACHABLE();
}
void CpuManager::Pause(bool paused) {
if (is_multicore) {
MultiCorePause(paused);
} else {
SingleCorePause(paused);
}
}
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
void CpuManager::RunThread(std::size_t core) {
/// Initialization
system.RegisterCoreThread(core);
std::string name;
@@ -320,45 +220,24 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
auto& data = core_data[core];
data.enter_barrier = std::make_unique<Common::Event>();
data.exit_barrier = std::make_unique<Common::Event>();
data.host_context = Common::Fiber::ThreadToFiber();
data.is_running = false;
data.initialized = true;
const bool sc_sync = !is_async_gpu && !is_multicore;
bool sc_sync_first_use = sc_sync;
// Cleanup
SCOPE_EXIT({
data.host_context->Exit();
data.enter_barrier.reset();
data.exit_barrier.reset();
data.initialized = false;
MicroProfileOnThreadExit();
});
/// Running
while (running_mode) {
data.is_running = false;
data.enter_barrier->Wait();
if (sc_sync_first_use) {
system.GPU().ObtainContext();
sc_sync_first_use = false;
}
// Running
gpu_barrier->Sync();
// Emulation was stopped
if (stop_token.stop_requested()) {
return;
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
data.is_running = true;
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
data.is_running = false;
data.is_paused = true;
data.exit_barrier->Wait();
data.is_paused = false;
if (!is_async_gpu && !is_multicore) {
system.GPU().ObtainContext();
}
auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread();
Kernel::SetCurrentThread(system.Kernel(), current_thread);
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
}
} // namespace Core

View File

@@ -43,15 +43,17 @@ public:
is_async_gpu = is_async;
}
void OnGpuReady() {
gpu_barrier->Sync();
}
void Initialize();
void Shutdown();
void Pause(bool paused);
static std::function<void(void*)> GetGuestThreadStartFunc();
static std::function<void(void*)> GetIdleThreadStartFunc();
static std::function<void(void*)> GetSuspendThreadStartFunc();
void* GetStartFuncParamater();
static std::function<void(void*)> GetShutdownThreadStartFunc();
void* GetStartFuncParameter();
void PreemptSingleCore(bool from_running_enviroment = true);
@@ -63,43 +65,34 @@ private:
static void GuestThreadFunction(void* cpu_manager);
static void GuestRewindFunction(void* cpu_manager);
static void IdleThreadFunction(void* cpu_manager);
static void SuspendThreadFunction(void* cpu_manager);
static void ShutdownThreadFunction(void* cpu_manager);
void MultiCoreRunGuestThread();
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void MultiCoreRunSuspendThread();
void MultiCorePause(bool paused);
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
void SingleCoreRunSuspendThread();
void SingleCorePause(bool paused);
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
void RunThread(std::stop_token stop_token, std::size_t core);
void ShutdownThread();
void RunThread(std::size_t core);
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::unique_ptr<Common::Event> enter_barrier;
std::unique_ptr<Common::Event> exit_barrier;
std::atomic<bool> is_running;
std::atomic<bool> is_paused;
std::atomic<bool> initialized;
std::jthread host_thread;
};
std::atomic<bool> running_mode{};
std::atomic<bool> paused_state{};
std::unique_ptr<Common::Barrier> gpu_barrier{};
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
bool is_async_gpu{};
bool is_multicore{};
std::atomic<std::size_t> current_core{};
std::size_t idle_count{};
std::size_t num_cores{};
static constexpr std::size_t max_cycle_runs = 5;
System& system;

View File

@@ -140,7 +140,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
return 0x3C;
}
UNREACHABLE();
return 0;
}
u64 GetSignatureTypePaddingSize(SignatureType type) {
@@ -155,7 +154,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
return 0x40;
}
UNREACHABLE();
return 0;
}
SignatureType Ticket::GetSignatureType() const {

View File

@@ -0,0 +1,315 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <mutex>
#include <thread>
#include <boost/asio.hpp>
#include <boost/process/async_pipe.hpp>
#include "common/logging/log.h"
#include "common/thread.h"
#include "core/core.h"
#include "core/debugger/debugger.h"
#include "core/debugger/debugger_interface.h"
#include "core/debugger/gdbstub.h"
#include "core/hle/kernel/global_scheduler_context.h"
template <typename Readable, typename Buffer, typename Callback>
static void AsyncReceiveInto(Readable& r, Buffer& buffer, Callback&& c) {
static_assert(std::is_trivial_v<Buffer>);
auto boost_buffer{boost::asio::buffer(&buffer, sizeof(Buffer))};
r.async_read_some(
boost_buffer, [&, c](const boost::system::error_code& error, size_t bytes_read) {
if (!error.failed()) {
const u8* buffer_start = reinterpret_cast<const u8*>(&buffer);
std::span<const u8> received_data{buffer_start, buffer_start + bytes_read};
c(received_data);
}
AsyncReceiveInto(r, buffer, c);
});
}
template <typename Readable, typename Buffer>
static std::span<const u8> ReceiveInto(Readable& r, Buffer& buffer) {
static_assert(std::is_trivial_v<Buffer>);
auto boost_buffer{boost::asio::buffer(&buffer, sizeof(Buffer))};
size_t bytes_read = r.read_some(boost_buffer);
const u8* buffer_start = reinterpret_cast<const u8*>(&buffer);
std::span<const u8> received_data{buffer_start, buffer_start + bytes_read};
return received_data;
}
enum class SignalType {
Stopped,
Watchpoint,
ShuttingDown,
};
struct SignalInfo {
SignalType type;
Kernel::KThread* thread;
const Kernel::DebugWatchpoint* watchpoint;
};
namespace Core {
class DebuggerImpl : public DebuggerBackend {
public:
explicit DebuggerImpl(Core::System& system_, u16 port)
: system{system_}, signal_pipe{io_context}, client_socket{io_context} {
frontend = std::make_unique<GDBStub>(*this, system);
InitializeServer(port);
}
~DebuggerImpl() override {
ShutdownServer();
}
bool SignalDebugger(SignalInfo signal_info) {
{
std::scoped_lock lk{connection_lock};
if (stopped) {
// Do not notify the debugger about another event.
// It should be ignored.
return false;
}
// Set up the state.
stopped = true;
info = signal_info;
}
// Write a single byte into the pipe to wake up the debug interface.
boost::asio::write(signal_pipe, boost::asio::buffer(&stopped, sizeof(stopped)));
return true;
}
std::span<const u8> ReadFromClient() override {
return ReceiveInto(client_socket, client_data);
}
void WriteToClient(std::span<const u8> data) override {
boost::asio::write(client_socket, boost::asio::buffer(data.data(), data.size_bytes()));
}
void SetActiveThread(Kernel::KThread* thread) override {
active_thread = thread;
}
Kernel::KThread* GetActiveThread() override {
return active_thread;
}
private:
void InitializeServer(u16 port) {
using boost::asio::ip::tcp;
LOG_INFO(Debug_GDBStub, "Starting server on port {}...", port);
// Run the connection thread.
connection_thread = std::jthread([&, port](std::stop_token stop_token) {
try {
// Initialize the listening socket and accept a new client.
tcp::endpoint endpoint{boost::asio::ip::address_v4::any(), port};
tcp::acceptor acceptor{io_context, endpoint};
acceptor.async_accept(client_socket, [](const auto&) {});
io_context.run_one();
io_context.restart();
if (stop_token.stop_requested()) {
return;
}
ThreadLoop(stop_token);
} catch (const std::exception& ex) {
LOG_CRITICAL(Debug_GDBStub, "Stopping server: {}", ex.what());
}
});
}
void ShutdownServer() {
connection_thread.request_stop();
io_context.stop();
connection_thread.join();
}
void ThreadLoop(std::stop_token stop_token) {
Common::SetCurrentThreadName("yuzu:Debugger");
// Set up the client signals for new data.
AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
AsyncReceiveInto(client_socket, client_data, [&](auto d) { ClientData(d); });
// Set the active thread.
UpdateActiveThread();
// Set up the frontend.
frontend->Connected();
// Main event loop.
while (!stop_token.stop_requested() && io_context.run()) {
}
}
void PipeData(std::span<const u8> data) {
switch (info.type) {
case SignalType::Stopped:
case SignalType::Watchpoint:
// Stop emulation.
PauseEmulation();
// Notify the client.
active_thread = info.thread;
UpdateActiveThread();
if (info.type == SignalType::Watchpoint) {
frontend->Watchpoint(active_thread, *info.watchpoint);
} else {
frontend->Stopped(active_thread);
}
break;
case SignalType::ShuttingDown:
frontend->ShuttingDown();
// Wait for emulation to shut down gracefully now.
signal_pipe.close();
client_socket.shutdown(boost::asio::socket_base::shutdown_both);
LOG_INFO(Debug_GDBStub, "Shut down server");
break;
}
}
void ClientData(std::span<const u8> data) {
const auto actions{frontend->ClientData(data)};
for (const auto action : actions) {
switch (action) {
case DebuggerAction::Interrupt: {
{
std::scoped_lock lk{connection_lock};
stopped = true;
}
PauseEmulation();
UpdateActiveThread();
frontend->Stopped(active_thread);
break;
}
case DebuggerAction::Continue:
MarkResumed([&] { ResumeEmulation(); });
break;
case DebuggerAction::StepThreadUnlocked:
MarkResumed([&] {
active_thread->SetStepState(Kernel::StepState::StepPending);
active_thread->Resume(Kernel::SuspendType::Debug);
ResumeEmulation(active_thread);
});
break;
case DebuggerAction::StepThreadLocked: {
MarkResumed([&] {
active_thread->SetStepState(Kernel::StepState::StepPending);
active_thread->Resume(Kernel::SuspendType::Debug);
});
break;
}
case DebuggerAction::ShutdownEmulation: {
// Spawn another thread that will exit after shutdown,
// to avoid a deadlock
Core::System* system_ref{&system};
std::thread t([system_ref] { system_ref->Exit(); });
t.detach();
break;
}
}
}
}
void PauseEmulation() {
// Put all threads to sleep on next scheduler round.
for (auto* thread : ThreadList()) {
thread->RequestSuspend(Kernel::SuspendType::Debug);
}
// Signal an interrupt so that scheduler will fire.
system.Kernel().InterruptAllPhysicalCores();
}
void ResumeEmulation(Kernel::KThread* except = nullptr) {
// Wake up all threads.
for (auto* thread : ThreadList()) {
if (thread == except) {
continue;
}
thread->SetStepState(Kernel::StepState::NotStepping);
thread->Resume(Kernel::SuspendType::Debug);
}
}
template <typename Callback>
void MarkResumed(Callback&& cb) {
std::scoped_lock lk{connection_lock};
stopped = false;
cb();
}
void UpdateActiveThread() {
const auto& threads{ThreadList()};
if (std::find(threads.begin(), threads.end(), active_thread) == threads.end()) {
active_thread = threads[0];
}
}
const std::vector<Kernel::KThread*>& ThreadList() {
return system.GlobalSchedulerContext().GetThreadList();
}
private:
System& system;
std::unique_ptr<DebuggerFrontend> frontend;
std::jthread connection_thread;
std::mutex connection_lock;
boost::asio::io_context io_context;
boost::process::async_pipe signal_pipe;
boost::asio::ip::tcp::socket client_socket;
SignalInfo info;
Kernel::KThread* active_thread;
bool pipe_data;
bool stopped;
std::array<u8, 4096> client_data;
};
Debugger::Debugger(Core::System& system, u16 port) {
try {
impl = std::make_unique<DebuggerImpl>(system, port);
} catch (const std::exception& ex) {
LOG_CRITICAL(Debug_GDBStub, "Failed to initialize debugger: {}", ex.what());
}
}
Debugger::~Debugger() = default;
bool Debugger::NotifyThreadStopped(Kernel::KThread* thread) {
return impl && impl->SignalDebugger(SignalInfo{SignalType::Stopped, thread, nullptr});
}
bool Debugger::NotifyThreadWatchpoint(Kernel::KThread* thread,
const Kernel::DebugWatchpoint& watch) {
return impl && impl->SignalDebugger(SignalInfo{SignalType::Watchpoint, thread, &watch});
}
void Debugger::NotifyShutdown() {
if (impl) {
impl->SignalDebugger(SignalInfo{SignalType::ShuttingDown, nullptr, nullptr});
}
}
} // namespace Core

View File

@@ -0,0 +1,52 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include "common/common_types.h"
namespace Kernel {
class KThread;
struct DebugWatchpoint;
} // namespace Kernel
namespace Core {
class System;
class DebuggerImpl;
class Debugger {
public:
/**
* Blocks and waits for a connection on localhost, port `server_port`.
* Does not create the debugger if the port is already in use.
*/
explicit Debugger(Core::System& system, u16 server_port);
~Debugger();
/**
* Notify the debugger that the given thread is stopped
* (due to a breakpoint, or due to stopping after a successful step).
*
* The debugger will asynchronously halt emulation after the notification has
* occurred. If another thread attempts to notify before emulation has stopped,
* it is ignored and this method will return false. Otherwise it will return true.
*/
bool NotifyThreadStopped(Kernel::KThread* thread);
/**
* Notify the debugger that a shutdown is being performed now and disconnect.
*/
void NotifyShutdown();
/*
* Notify the debugger that the given thread has stopped due to hitting a watchpoint.
*/
bool NotifyThreadWatchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch);
private:
std::unique_ptr<DebuggerImpl> impl;
};
} // namespace Core

View File

@@ -0,0 +1,90 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <functional>
#include <span>
#include <vector>
#include "common/common_types.h"
namespace Kernel {
class KThread;
struct DebugWatchpoint;
} // namespace Kernel
namespace Core {
enum class DebuggerAction {
Interrupt, ///< Stop emulation as soon as possible.
Continue, ///< Resume emulation.
StepThreadLocked, ///< Step the currently-active thread without resuming others.
StepThreadUnlocked, ///< Step the currently-active thread and resume others.
ShutdownEmulation, ///< Shut down the emulator.
};
class DebuggerBackend {
public:
virtual ~DebuggerBackend() = default;
/**
* Can be invoked from a callback to synchronously wait for more data.
* Will return as soon as least one byte is received. Reads up to 4096 bytes.
*/
virtual std::span<const u8> ReadFromClient() = 0;
/**
* Can be invoked from a callback to write data to the client.
* Returns immediately after the data is sent.
*/
virtual void WriteToClient(std::span<const u8> data) = 0;
/**
* Gets the currently active thread when the debugger is stopped.
*/
virtual Kernel::KThread* GetActiveThread() = 0;
/**
* Sets the currently active thread when the debugger is stopped.
*/
virtual void SetActiveThread(Kernel::KThread* thread) = 0;
};
class DebuggerFrontend {
public:
explicit DebuggerFrontend(DebuggerBackend& backend_) : backend{backend_} {}
virtual ~DebuggerFrontend() = default;
/**
* Called after the client has successfully connected to the port.
*/
virtual void Connected() = 0;
/**
* Called when emulation has stopped.
*/
virtual void Stopped(Kernel::KThread* thread) = 0;
/**
* Called when emulation is shutting down.
*/
virtual void ShuttingDown() = 0;
/*
* Called when emulation has stopped on a watchpoint.
*/
virtual void Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) = 0;
/**
* Called when new data is asynchronously received on the client socket.
* A list of actions to perform is returned.
*/
[[nodiscard]] virtual std::vector<DebuggerAction> ClientData(std::span<const u8> data) = 0;
protected:
DebuggerBackend& backend;
};
} // namespace Core

View File

@@ -0,0 +1,718 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <atomic>
#include <numeric>
#include <optional>
#include <thread>
#include <boost/algorithm/string.hpp>
#include "common/hex_util.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/debugger/gdbstub.h"
#include "core/debugger/gdbstub_arch.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_thread.h"
#include "core/loader/loader.h"
#include "core/memory.h"
namespace Core {
constexpr char GDB_STUB_START = '$';
constexpr char GDB_STUB_END = '#';
constexpr char GDB_STUB_ACK = '+';
constexpr char GDB_STUB_NACK = '-';
constexpr char GDB_STUB_INT3 = 0x03;
constexpr int GDB_STUB_SIGTRAP = 5;
constexpr char GDB_STUB_REPLY_ERR[] = "E01";
constexpr char GDB_STUB_REPLY_OK[] = "OK";
constexpr char GDB_STUB_REPLY_EMPTY[] = "";
static u8 CalculateChecksum(std::string_view data) {
return std::accumulate(data.begin(), data.end(), u8{0},
[](u8 lhs, u8 rhs) { return static_cast<u8>(lhs + rhs); });
}
static std::string EscapeGDB(std::string_view data) {
std::string escaped;
escaped.reserve(data.size());
for (char c : data) {
switch (c) {
case '#':
escaped += "}\x03";
break;
case '$':
escaped += "}\x04";
break;
case '*':
escaped += "}\x0a";
break;
case '}':
escaped += "}\x5d";
break;
default:
escaped += c;
break;
}
}
return escaped;
}
static std::string EscapeXML(std::string_view data) {
std::string escaped;
escaped.reserve(data.size());
for (char c : data) {
switch (c) {
case '&':
escaped += "&amp;";
break;
case '"':
escaped += "&quot;";
break;
case '<':
escaped += "&lt;";
break;
case '>':
escaped += "&gt;";
break;
default:
escaped += c;
break;
}
}
return escaped;
}
GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
: DebuggerFrontend(backend_), system{system_} {
if (system.CurrentProcess()->Is64BitProcess()) {
arch = std::make_unique<GDBStubA64>();
} else {
arch = std::make_unique<GDBStubA32>();
}
}
GDBStub::~GDBStub() = default;
void GDBStub::Connected() {}
void GDBStub::ShuttingDown() {}
void GDBStub::Stopped(Kernel::KThread* thread) {
SendReply(arch->ThreadStatus(thread, GDB_STUB_SIGTRAP));
}
void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) {
const auto status{arch->ThreadStatus(thread, GDB_STUB_SIGTRAP)};
switch (watch.type) {
case Kernel::DebugWatchpointType::Read:
SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address));
break;
case Kernel::DebugWatchpointType::Write:
SendReply(fmt::format("{}watch:{:x};", status, watch.start_address));
break;
case Kernel::DebugWatchpointType::ReadOrWrite:
default:
SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address));
break;
}
}
std::vector<DebuggerAction> GDBStub::ClientData(std::span<const u8> data) {
std::vector<DebuggerAction> actions;
current_command.insert(current_command.end(), data.begin(), data.end());
while (current_command.size() != 0) {
ProcessData(actions);
}
return actions;
}
void GDBStub::ProcessData(std::vector<DebuggerAction>& actions) {
const char c{current_command[0]};
// Acknowledgement
if (c == GDB_STUB_ACK || c == GDB_STUB_NACK) {
current_command.erase(current_command.begin());
return;
}
// Interrupt
if (c == GDB_STUB_INT3) {
LOG_INFO(Debug_GDBStub, "Received interrupt");
current_command.erase(current_command.begin());
actions.push_back(DebuggerAction::Interrupt);
SendStatus(GDB_STUB_ACK);
return;
}
// Otherwise, require the data to be the start of a command
if (c != GDB_STUB_START) {
LOG_ERROR(Debug_GDBStub, "Invalid command buffer contents: {}", current_command.data());
current_command.clear();
SendStatus(GDB_STUB_NACK);
return;
}
// Continue reading until command is complete
while (CommandEnd() == current_command.end()) {
const auto new_data{backend.ReadFromClient()};
current_command.insert(current_command.end(), new_data.begin(), new_data.end());
}
// Execute and respond to GDB
const auto command{DetachCommand()};
if (command) {
SendStatus(GDB_STUB_ACK);
ExecuteCommand(*command, actions);
} else {
SendStatus(GDB_STUB_NACK);
}
}
void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction>& actions) {
LOG_TRACE(Debug_GDBStub, "Executing command: {}", packet);
if (packet.length() == 0) {
SendReply(GDB_STUB_REPLY_ERR);
return;
}
if (packet.starts_with("vCont")) {
HandleVCont(packet.substr(5), actions);
return;
}
std::string_view command{packet.substr(1, packet.size())};
switch (packet[0]) {
case 'H': {
Kernel::KThread* thread{nullptr};
s64 thread_id{strtoll(command.data() + 1, nullptr, 16)};
if (thread_id >= 1) {
thread = GetThreadByID(thread_id);
} else {
thread = backend.GetActiveThread();
}
if (thread) {
SendReply(GDB_STUB_REPLY_OK);
backend.SetActiveThread(thread);
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
break;
}
case 'T': {
s64 thread_id{strtoll(command.data(), nullptr, 16)};
if (GetThreadByID(thread_id)) {
SendReply(GDB_STUB_REPLY_OK);
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
break;
}
case 'Q':
case 'q':
HandleQuery(command);
break;
case '?':
SendReply(arch->ThreadStatus(backend.GetActiveThread(), GDB_STUB_SIGTRAP));
break;
case 'k':
LOG_INFO(Debug_GDBStub, "Shutting down emulation");
actions.push_back(DebuggerAction::ShutdownEmulation);
break;
case 'g':
SendReply(arch->ReadRegisters(backend.GetActiveThread()));
break;
case 'G':
arch->WriteRegisters(backend.GetActiveThread(), command);
SendReply(GDB_STUB_REPLY_OK);
break;
case 'p': {
const size_t reg{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
SendReply(arch->RegRead(backend.GetActiveThread(), reg));
break;
}
case 'P': {
const auto sep{std::find(command.begin(), command.end(), '=') - command.begin() + 1};
const size_t reg{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
arch->RegWrite(backend.GetActiveThread(), reg, std::string_view(command).substr(sep));
SendReply(GDB_STUB_REPLY_OK);
break;
}
case 'm': {
const auto sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
const size_t addr{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + sep, nullptr, 16))};
if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
std::vector<u8> mem(size);
system.Memory().ReadBlock(addr, mem.data(), size);
SendReply(Common::HexToString(mem));
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
break;
}
case 'M': {
const auto size_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
const auto mem_sep{std::find(command.begin(), command.end(), ':') - command.begin() + 1};
const size_t addr{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
const auto mem_substr{std::string_view(command).substr(mem_sep)};
const auto mem{Common::HexStringToVector(mem_substr, false)};
if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
system.Memory().WriteBlock(addr, mem.data(), size);
system.InvalidateCpuInstructionCacheRange(addr, size);
SendReply(GDB_STUB_REPLY_OK);
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
break;
}
case 's':
actions.push_back(DebuggerAction::StepThreadLocked);
break;
case 'C':
case 'c':
actions.push_back(DebuggerAction::Continue);
break;
case 'Z':
HandleBreakpointInsert(command);
break;
case 'z':
HandleBreakpointRemove(command);
break;
default:
SendReply(GDB_STUB_REPLY_EMPTY);
break;
}
}
enum class BreakpointType {
Software = 0,
Hardware = 1,
WriteWatch = 2,
ReadWatch = 3,
AccessWatch = 4,
};
void GDBStub::HandleBreakpointInsert(std::string_view command) {
const auto type{static_cast<BreakpointType>(strtoll(command.data(), nullptr, 16))};
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
const auto size_sep{std::find(command.begin() + addr_sep, command.end(), ',') -
command.begin() + 1};
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
SendReply(GDB_STUB_REPLY_ERR);
return;
}
bool success{};
switch (type) {
case BreakpointType::Software:
replaced_instructions[addr] = system.Memory().Read32(addr);
system.Memory().Write32(addr, arch->BreakpointInstruction());
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
success = true;
break;
case BreakpointType::WriteWatch:
success = system.CurrentProcess()->InsertWatchpoint(system, addr, size,
Kernel::DebugWatchpointType::Write);
break;
case BreakpointType::ReadWatch:
success = system.CurrentProcess()->InsertWatchpoint(system, addr, size,
Kernel::DebugWatchpointType::Read);
break;
case BreakpointType::AccessWatch:
success = system.CurrentProcess()->InsertWatchpoint(
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
break;
case BreakpointType::Hardware:
default:
SendReply(GDB_STUB_REPLY_EMPTY);
return;
}
if (success) {
SendReply(GDB_STUB_REPLY_OK);
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
}
void GDBStub::HandleBreakpointRemove(std::string_view command) {
const auto type{static_cast<BreakpointType>(strtoll(command.data(), nullptr, 16))};
const auto addr_sep{std::find(command.begin(), command.end(), ',') - command.begin() + 1};
const auto size_sep{std::find(command.begin() + addr_sep, command.end(), ',') -
command.begin() + 1};
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
SendReply(GDB_STUB_REPLY_ERR);
return;
}
bool success{};
switch (type) {
case BreakpointType::Software: {
const auto orig_insn{replaced_instructions.find(addr)};
if (orig_insn != replaced_instructions.end()) {
system.Memory().Write32(addr, orig_insn->second);
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
replaced_instructions.erase(addr);
success = true;
}
break;
}
case BreakpointType::WriteWatch:
success = system.CurrentProcess()->RemoveWatchpoint(system, addr, size,
Kernel::DebugWatchpointType::Write);
break;
case BreakpointType::ReadWatch:
success = system.CurrentProcess()->RemoveWatchpoint(system, addr, size,
Kernel::DebugWatchpointType::Read);
break;
case BreakpointType::AccessWatch:
success = system.CurrentProcess()->RemoveWatchpoint(
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
break;
case BreakpointType::Hardware:
default:
SendReply(GDB_STUB_REPLY_EMPTY);
return;
}
if (success) {
SendReply(GDB_STUB_REPLY_OK);
} else {
SendReply(GDB_STUB_REPLY_ERR);
}
}
// Structure offsets are from Atmosphere
// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
const Kernel::KThread* thread) {
// Read thread type from TLS
const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)};
const VAddr argument_thread_type{thread->GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
return std::nullopt;
}
if (!tls_thread_type) {
return std::nullopt;
}
const u16 version{memory.Read16(tls_thread_type + 0x26)};
VAddr name_pointer{};
if (version == 1) {
name_pointer = memory.Read32(tls_thread_type + 0xe4);
} else {
name_pointer = memory.Read32(tls_thread_type + 0xe8);
}
if (!name_pointer) {
// No name provided.
return std::nullopt;
}
return memory.ReadCString(name_pointer, 256);
}
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
const Kernel::KThread* thread) {
// Read thread type from TLS
const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)};
const VAddr argument_thread_type{thread->GetArgument()};
if (argument_thread_type && tls_thread_type != argument_thread_type) {
// Probably not created by nnsdk, no name available.
return std::nullopt;
}
if (!tls_thread_type) {
return std::nullopt;
}
const u16 version{memory.Read16(tls_thread_type + 0x46)};
VAddr name_pointer{};
if (version == 1) {
name_pointer = memory.Read64(tls_thread_type + 0x1a0);
} else {
name_pointer = memory.Read64(tls_thread_type + 0x1a8);
}
if (!name_pointer) {
// No name provided.
return std::nullopt;
}
return memory.ReadCString(name_pointer, 256);
}
static std::optional<std::string> GetThreadName(Core::System& system,
const Kernel::KThread* thread) {
if (system.CurrentProcess()->Is64BitProcess()) {
return GetNameFromThreadType64(system.Memory(), thread);
} else {
return GetNameFromThreadType32(system.Memory(), thread);
}
}
static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
switch (thread->GetWaitReasonForDebugging()) {
case Kernel::ThreadWaitReasonForDebugging::Sleep:
return "Sleep";
case Kernel::ThreadWaitReasonForDebugging::IPC:
return "IPC";
case Kernel::ThreadWaitReasonForDebugging::Synchronization:
return "Synchronization";
case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
return "ConditionVar";
case Kernel::ThreadWaitReasonForDebugging::Arbitration:
return "Arbitration";
case Kernel::ThreadWaitReasonForDebugging::Suspended:
return "Suspended";
default:
return "Unknown";
}
}
static std::string GetThreadState(const Kernel::KThread* thread) {
switch (thread->GetState()) {
case Kernel::ThreadState::Initialized:
return "Initialized";
case Kernel::ThreadState::Waiting:
return fmt::format("Waiting ({})", GetThreadWaitReason(thread));
case Kernel::ThreadState::Runnable:
return "Runnable";
case Kernel::ThreadState::Terminated:
return "Terminated";
default:
return "Unknown";
}
}
static std::string PaginateBuffer(std::string_view buffer, std::string_view request) {
const auto amount{request.substr(request.find(',') + 1)};
const auto offset_val{static_cast<u64>(strtoll(request.data(), nullptr, 16))};
const auto amount_val{static_cast<u64>(strtoll(amount.data(), nullptr, 16))};
if (offset_val + amount_val > buffer.size()) {
return fmt::format("l{}", buffer.substr(offset_val));
} else {
return fmt::format("m{}", buffer.substr(offset_val, amount_val));
}
}
void GDBStub::HandleQuery(std::string_view command) {
if (command.starts_with("TStatus")) {
// no tracepoint support
SendReply("T0");
} else if (command.starts_with("Supported")) {
SendReply("PacketSize=4000;qXfer:features:read+;qXfer:threads:read+;qXfer:libraries:read+;"
"vContSupported+;QStartNoAckMode+");
} else if (command.starts_with("Xfer:features:read:target.xml:")) {
const auto target_xml{arch->GetTargetXML()};
SendReply(PaginateBuffer(target_xml, command.substr(30)));
} else if (command.starts_with("Offsets")) {
Loader::AppLoader::Modules modules;
system.GetAppLoader().ReadNSOModules(modules);
const auto main = std::find_if(modules.begin(), modules.end(),
[](const auto& key) { return key.second == "main"; });
if (main != modules.end()) {
SendReply(fmt::format("TextSeg={:x}", main->first));
} else {
SendReply(fmt::format("TextSeg={:x}",
system.CurrentProcess()->PageTable().GetCodeRegionStart()));
}
} else if (command.starts_with("Xfer:libraries:read::")) {
Loader::AppLoader::Modules modules;
system.GetAppLoader().ReadNSOModules(modules);
std::string buffer;
buffer += R"(<?xml version="1.0"?>)";
buffer += "<library-list>";
for (const auto& [base, name] : modules) {
buffer += fmt::format(R"(<library name="{}"><segment address="{:#x}"/></library>)",
EscapeXML(name), base);
}
buffer += "</library-list>";
SendReply(PaginateBuffer(buffer, command.substr(21)));
} else if (command.starts_with("fThreadInfo")) {
// beginning of list
const auto& threads = system.GlobalSchedulerContext().GetThreadList();
std::vector<std::string> thread_ids;
for (const auto& thread : threads) {
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID()));
}
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
} else if (command.starts_with("sThreadInfo")) {
// end of list
SendReply("l");
} else if (command.starts_with("Xfer:threads:read::")) {
std::string buffer;
buffer += R"(<?xml version="1.0"?>)";
buffer += "<threads>";
const auto& threads = system.GlobalSchedulerContext().GetThreadList();
for (const auto* thread : threads) {
auto thread_name{GetThreadName(system, thread)};
if (!thread_name) {
thread_name = fmt::format("Thread {:d}", thread->GetThreadID());
}
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
thread->GetThreadID(), thread->GetActiveCore(),
EscapeXML(*thread_name), GetThreadState(thread));
}
buffer += "</threads>";
SendReply(PaginateBuffer(buffer, command.substr(19)));
} else if (command.starts_with("Attached")) {
SendReply("0");
} else if (command.starts_with("StartNoAckMode")) {
no_ack = true;
SendReply(GDB_STUB_REPLY_OK);
} else {
SendReply(GDB_STUB_REPLY_EMPTY);
}
}
void GDBStub::HandleVCont(std::string_view command, std::vector<DebuggerAction>& actions) {
if (command == "?") {
// Continuing and stepping are supported
// (signal is ignored, but required for GDB to use vCont)
SendReply("vCont;c;C;s;S");
return;
}
Kernel::KThread* stepped_thread{nullptr};
bool lock_execution{true};
std::vector<std::string> entries;
boost::split(entries, command.substr(1), boost::is_any_of(";"));
for (const auto& thread_action : entries) {
std::vector<std::string> parts;
boost::split(parts, thread_action, boost::is_any_of(":"));
if (parts.size() == 1 && (parts[0] == "c" || parts[0].starts_with("C"))) {
lock_execution = false;
}
if (parts.size() == 2 && (parts[0] == "s" || parts[0].starts_with("S"))) {
stepped_thread = GetThreadByID(strtoll(parts[1].data(), nullptr, 16));
}
}
if (stepped_thread) {
backend.SetActiveThread(stepped_thread);
actions.push_back(lock_execution ? DebuggerAction::StepThreadLocked
: DebuggerAction::StepThreadUnlocked);
} else {
actions.push_back(DebuggerAction::Continue);
}
}
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
const auto& threads{system.GlobalSchedulerContext().GetThreadList()};
for (auto* thread : threads) {
if (thread->GetThreadID() == thread_id) {
return thread;
}
}
return nullptr;
}
std::vector<char>::const_iterator GDBStub::CommandEnd() const {
// Find the end marker
const auto end{std::find(current_command.begin(), current_command.end(), GDB_STUB_END)};
// Require the checksum to be present
return std::min(end + 2, current_command.end());
}
std::optional<std::string> GDBStub::DetachCommand() {
// Slice the string part from the beginning to the end marker
const auto end{CommandEnd()};
// Extract possible command data
std::string data(current_command.data(), end - current_command.begin() + 1);
// Shift over the remaining contents
current_command.erase(current_command.begin(), end + 1);
// Validate received command
if (data[0] != GDB_STUB_START) {
LOG_ERROR(Debug_GDBStub, "Invalid start data: {}", data[0]);
return std::nullopt;
}
u8 calculated = CalculateChecksum(std::string_view(data).substr(1, data.size() - 4));
u8 received = static_cast<u8>(strtoll(data.data() + data.size() - 2, nullptr, 16));
// Verify checksum
if (calculated != received) {
LOG_ERROR(Debug_GDBStub, "Checksum mismatch: calculated {:02x}, received {:02x}",
calculated, received);
return std::nullopt;
}
return data.substr(1, data.size() - 4);
}
void GDBStub::SendReply(std::string_view data) {
const auto escaped{EscapeGDB(data)};
const auto output{fmt::format("{}{}{}{:02x}", GDB_STUB_START, escaped, GDB_STUB_END,
CalculateChecksum(escaped))};
LOG_TRACE(Debug_GDBStub, "Writing reply: {}", output);
// C++ string support is complete rubbish
const u8* output_begin = reinterpret_cast<const u8*>(output.data());
const u8* output_end = output_begin + output.size();
backend.WriteToClient(std::span<const u8>(output_begin, output_end));
}
void GDBStub::SendStatus(char status) {
if (no_ack) {
return;
}
std::array<u8, 1> buf = {static_cast<u8>(status)};
LOG_TRACE(Debug_GDBStub, "Writing status: {}", status);
backend.WriteToClient(buf);
}
} // namespace Core

View File

@@ -0,0 +1,52 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <map>
#include <memory>
#include <optional>
#include <string_view>
#include <vector>
#include "core/debugger/debugger_interface.h"
#include "core/debugger/gdbstub_arch.h"
namespace Core {
class System;
class GDBStub : public DebuggerFrontend {
public:
explicit GDBStub(DebuggerBackend& backend, Core::System& system);
~GDBStub() override;
void Connected() override;
void Stopped(Kernel::KThread* thread) override;
void ShuttingDown() override;
void Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint& watch) override;
std::vector<DebuggerAction> ClientData(std::span<const u8> data) override;
private:
void ProcessData(std::vector<DebuggerAction>& actions);
void ExecuteCommand(std::string_view packet, std::vector<DebuggerAction>& actions);
void HandleVCont(std::string_view command, std::vector<DebuggerAction>& actions);
void HandleQuery(std::string_view command);
void HandleBreakpointInsert(std::string_view command);
void HandleBreakpointRemove(std::string_view command);
std::vector<char>::const_iterator CommandEnd() const;
std::optional<std::string> DetachCommand();
Kernel::KThread* GetThreadByID(u64 thread_id);
void SendReply(std::string_view data);
void SendStatus(char status);
private:
Core::System& system;
std::unique_ptr<GDBStubArch> arch;
std::vector<char> current_command;
std::map<VAddr, u32> replaced_instructions;
bool no_ack{};
};
} // namespace Core

View File

@@ -0,0 +1,483 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/hex_util.h"
#include "core/debugger/gdbstub_arch.h"
#include "core/hle/kernel/k_thread.h"
namespace Core {
template <typename T>
static T HexToValue(std::string_view hex) {
static_assert(std::is_trivially_copyable_v<T>);
T value{};
const auto mem{Common::HexStringToVector(hex, false)};
std::memcpy(&value, mem.data(), std::min(mem.size(), sizeof(T)));
return value;
}
template <typename T>
static std::string ValueToHex(const T value) {
static_assert(std::is_trivially_copyable_v<T>);
std::array<u8, sizeof(T)> mem{};
std::memcpy(mem.data(), &value, sizeof(T));
return Common::HexToString(mem);
}
template <typename T>
static T GetSIMDRegister(const std::array<u32, 64>& simd_regs, size_t offset) {
static_assert(std::is_trivially_copyable_v<T>);
T value{};
std::memcpy(&value, reinterpret_cast<const u8*>(simd_regs.data()) + sizeof(T) * offset,
sizeof(T));
return value;
}
template <typename T>
static void PutSIMDRegister(std::array<u32, 64>& simd_regs, size_t offset, const T value) {
static_assert(std::is_trivially_copyable_v<T>);
std::memcpy(reinterpret_cast<u8*>(simd_regs.data()) + sizeof(T) * offset, &value, sizeof(T));
}
// For sample XML files see the GDB source /gdb/features
// This XML defines what the registers are for this specific ARM device
std::string GDBStubA64::GetTargetXML() const {
constexpr const char* target_xml =
R"(<?xml version="1.0"?>
<!DOCTYPE target SYSTEM "gdb-target.dtd">
<target version="1.0">
<architecture>aarch64</architecture>
<feature name="org.gnu.gdb.aarch64.core">
<reg name="x0" bitsize="64"/>
<reg name="x1" bitsize="64"/>
<reg name="x2" bitsize="64"/>
<reg name="x3" bitsize="64"/>
<reg name="x4" bitsize="64"/>
<reg name="x5" bitsize="64"/>
<reg name="x6" bitsize="64"/>
<reg name="x7" bitsize="64"/>
<reg name="x8" bitsize="64"/>
<reg name="x9" bitsize="64"/>
<reg name="x10" bitsize="64"/>
<reg name="x11" bitsize="64"/>
<reg name="x12" bitsize="64"/>
<reg name="x13" bitsize="64"/>
<reg name="x14" bitsize="64"/>
<reg name="x15" bitsize="64"/>
<reg name="x16" bitsize="64"/>
<reg name="x17" bitsize="64"/>
<reg name="x18" bitsize="64"/>
<reg name="x19" bitsize="64"/>
<reg name="x20" bitsize="64"/>
<reg name="x21" bitsize="64"/>
<reg name="x22" bitsize="64"/>
<reg name="x23" bitsize="64"/>
<reg name="x24" bitsize="64"/>
<reg name="x25" bitsize="64"/>
<reg name="x26" bitsize="64"/>
<reg name="x27" bitsize="64"/>
<reg name="x28" bitsize="64"/>
<reg name="x29" bitsize="64"/>
<reg name="x30" bitsize="64"/>
<reg name="sp" bitsize="64" type="data_ptr"/>
<reg name="pc" bitsize="64" type="code_ptr"/>
<flags id="cpsr_flags" size="4">
<field name="SP" start="0" end="0"/>
<field name="" start="1" end="1"/>
<field name="EL" start="2" end="3"/>
<field name="nRW" start="4" end="4"/>
<field name="" start="5" end="5"/>
<field name="F" start="6" end="6"/>
<field name="I" start="7" end="7"/>
<field name="A" start="8" end="8"/>
<field name="D" start="9" end="9"/>
<field name="IL" start="20" end="20"/>
<field name="SS" start="21" end="21"/>
<field name="V" start="28" end="28"/>
<field name="C" start="29" end="29"/>
<field name="Z" start="30" end="30"/>
<field name="N" start="31" end="31"/>
</flags>
<reg name="cpsr" bitsize="32" type="cpsr_flags"/>
</feature>
<feature name="org.gnu.gdb.aarch64.fpu">
<vector id="v2d" type="ieee_double" count="2"/>
<vector id="v2u" type="uint64" count="2"/>
<vector id="v2i" type="int64" count="2"/>
<vector id="v4f" type="ieee_single" count="4"/>
<vector id="v4u" type="uint32" count="4"/>
<vector id="v4i" type="int32" count="4"/>
<vector id="v8u" type="uint16" count="8"/>
<vector id="v8i" type="int16" count="8"/>
<vector id="v16u" type="uint8" count="16"/>
<vector id="v16i" type="int8" count="16"/>
<vector id="v1u" type="uint128" count="1"/>
<vector id="v1i" type="int128" count="1"/>
<union id="vnd">
<field name="f" type="v2d"/>
<field name="u" type="v2u"/>
<field name="s" type="v2i"/>
</union>
<union id="vns">
<field name="f" type="v4f"/>
<field name="u" type="v4u"/>
<field name="s" type="v4i"/>
</union>
<union id="vnh">
<field name="u" type="v8u"/>
<field name="s" type="v8i"/>
</union>
<union id="vnb">
<field name="u" type="v16u"/>
<field name="s" type="v16i"/>
</union>
<union id="vnq">
<field name="u" type="v1u"/>
<field name="s" type="v1i"/>
</union>
<union id="aarch64v">
<field name="d" type="vnd"/>
<field name="s" type="vns"/>
<field name="h" type="vnh"/>
<field name="b" type="vnb"/>
<field name="q" type="vnq"/>
</union>
<reg name="v0" bitsize="128" type="aarch64v" regnum="34"/>
<reg name="v1" bitsize="128" type="aarch64v" />
<reg name="v2" bitsize="128" type="aarch64v" />
<reg name="v3" bitsize="128" type="aarch64v" />
<reg name="v4" bitsize="128" type="aarch64v" />
<reg name="v5" bitsize="128" type="aarch64v" />
<reg name="v6" bitsize="128" type="aarch64v" />
<reg name="v7" bitsize="128" type="aarch64v" />
<reg name="v8" bitsize="128" type="aarch64v" />
<reg name="v9" bitsize="128" type="aarch64v" />
<reg name="v10" bitsize="128" type="aarch64v"/>
<reg name="v11" bitsize="128" type="aarch64v"/>
<reg name="v12" bitsize="128" type="aarch64v"/>
<reg name="v13" bitsize="128" type="aarch64v"/>
<reg name="v14" bitsize="128" type="aarch64v"/>
<reg name="v15" bitsize="128" type="aarch64v"/>
<reg name="v16" bitsize="128" type="aarch64v"/>
<reg name="v17" bitsize="128" type="aarch64v"/>
<reg name="v18" bitsize="128" type="aarch64v"/>
<reg name="v19" bitsize="128" type="aarch64v"/>
<reg name="v20" bitsize="128" type="aarch64v"/>
<reg name="v21" bitsize="128" type="aarch64v"/>
<reg name="v22" bitsize="128" type="aarch64v"/>
<reg name="v23" bitsize="128" type="aarch64v"/>
<reg name="v24" bitsize="128" type="aarch64v"/>
<reg name="v25" bitsize="128" type="aarch64v"/>
<reg name="v26" bitsize="128" type="aarch64v"/>
<reg name="v27" bitsize="128" type="aarch64v"/>
<reg name="v28" bitsize="128" type="aarch64v"/>
<reg name="v29" bitsize="128" type="aarch64v"/>
<reg name="v30" bitsize="128" type="aarch64v"/>
<reg name="v31" bitsize="128" type="aarch64v"/>
<reg name="fpsr" bitsize="32"/>
<reg name="fpcr" bitsize="32"/>
</feature>
</target>)";
return target_xml;
}
std::string GDBStubA64::RegRead(const Kernel::KThread* thread, size_t id) const {
if (!thread) {
return "";
}
const auto& context{thread->GetContext64()};
const auto& gprs{context.cpu_registers};
const auto& fprs{context.vector_registers};
if (id <= SP_REGISTER) {
return ValueToHex(gprs[id]);
} else if (id == PC_REGISTER) {
return ValueToHex(context.pc);
} else if (id == PSTATE_REGISTER) {
return ValueToHex(context.pstate);
} else if (id >= Q0_REGISTER && id < FPSR_REGISTER) {
return ValueToHex(fprs[id - Q0_REGISTER]);
} else if (id == FPSR_REGISTER) {
return ValueToHex(context.fpsr);
} else if (id == FPCR_REGISTER) {
return ValueToHex(context.fpcr);
} else {
return "";
}
}
void GDBStubA64::RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const {
if (!thread) {
return;
}
auto& context{thread->GetContext64()};
if (id <= SP_REGISTER) {
context.cpu_registers[id] = HexToValue<u64>(value);
} else if (id == PC_REGISTER) {
context.pc = HexToValue<u64>(value);
} else if (id == PSTATE_REGISTER) {
context.pstate = HexToValue<u32>(value);
} else if (id >= Q0_REGISTER && id < FPSR_REGISTER) {
context.vector_registers[id - Q0_REGISTER] = HexToValue<u128>(value);
} else if (id == FPSR_REGISTER) {
context.fpsr = HexToValue<u32>(value);
} else if (id == FPCR_REGISTER) {
context.fpcr = HexToValue<u32>(value);
}
}
std::string GDBStubA64::ReadRegisters(const Kernel::KThread* thread) const {
std::string output;
for (size_t reg = 0; reg <= FPCR_REGISTER; reg++) {
output += RegRead(thread, reg);
}
return output;
}
void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view register_data) const {
for (size_t i = 0, reg = 0; reg <= FPCR_REGISTER; reg++) {
if (reg <= SP_REGISTER || reg == PC_REGISTER) {
RegWrite(thread, reg, register_data.substr(i, 16));
i += 16;
} else if (reg == PSTATE_REGISTER || reg == FPCR_REGISTER || reg == FPSR_REGISTER) {
RegWrite(thread, reg, register_data.substr(i, 8));
i += 8;
} else if (reg >= Q0_REGISTER && reg < FPCR_REGISTER) {
RegWrite(thread, reg, register_data.substr(i, 32));
i += 32;
}
}
}
std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
}
u32 GDBStubA64::BreakpointInstruction() const {
// A64: brk #0
return 0xd4200000;
}
std::string GDBStubA32::GetTargetXML() const {
constexpr const char* target_xml =
R"(<?xml version="1.0"?>
<!DOCTYPE target SYSTEM "gdb-target.dtd">
<target version="1.0">
<architecture>arm</architecture>
<feature name="org.gnu.gdb.arm.core">
<reg name="r0" bitsize="32" type="uint32"/>
<reg name="r1" bitsize="32" type="uint32"/>
<reg name="r2" bitsize="32" type="uint32"/>
<reg name="r3" bitsize="32" type="uint32"/>
<reg name="r4" bitsize="32" type="uint32"/>
<reg name="r5" bitsize="32" type="uint32"/>
<reg name="r6" bitsize="32" type="uint32"/>
<reg name="r7" bitsize="32" type="uint32"/>
<reg name="r8" bitsize="32" type="uint32"/>
<reg name="r9" bitsize="32" type="uint32"/>
<reg name="r10" bitsize="32" type="uint32"/>
<reg name="r11" bitsize="32" type="uint32"/>
<reg name="r12" bitsize="32" type="uint32"/>
<reg name="sp" bitsize="32" type="data_ptr"/>
<reg name="lr" bitsize="32" type="code_ptr"/>
<reg name="pc" bitsize="32" type="code_ptr"/>
<!-- The CPSR is register 25, rather than register 16, because
the FPA registers historically were placed between the PC
and the CPSR in the "g" packet. -->
<reg name="cpsr" bitsize="32" regnum="25"/>
</feature>
<feature name="org.gnu.gdb.arm.vfp">
<vector id="neon_uint8x8" type="uint8" count="8"/>
<vector id="neon_uint16x4" type="uint16" count="4"/>
<vector id="neon_uint32x2" type="uint32" count="2"/>
<vector id="neon_float32x2" type="ieee_single" count="2"/>
<union id="neon_d">
<field name="u8" type="neon_uint8x8"/>
<field name="u16" type="neon_uint16x4"/>
<field name="u32" type="neon_uint32x2"/>
<field name="u64" type="uint64"/>
<field name="f32" type="neon_float32x2"/>
<field name="f64" type="ieee_double"/>
</union>
<vector id="neon_uint8x16" type="uint8" count="16"/>
<vector id="neon_uint16x8" type="uint16" count="8"/>
<vector id="neon_uint32x4" type="uint32" count="4"/>
<vector id="neon_uint64x2" type="uint64" count="2"/>
<vector id="neon_float32x4" type="ieee_single" count="4"/>
<vector id="neon_float64x2" type="ieee_double" count="2"/>
<union id="neon_q">
<field name="u8" type="neon_uint8x16"/>
<field name="u16" type="neon_uint16x8"/>
<field name="u32" type="neon_uint32x4"/>
<field name="u64" type="neon_uint64x2"/>
<field name="f32" type="neon_float32x4"/>
<field name="f64" type="neon_float64x2"/>
</union>
<reg name="d0" bitsize="64" type="neon_d" regnum="32"/>
<reg name="d1" bitsize="64" type="neon_d"/>
<reg name="d2" bitsize="64" type="neon_d"/>
<reg name="d3" bitsize="64" type="neon_d"/>
<reg name="d4" bitsize="64" type="neon_d"/>
<reg name="d5" bitsize="64" type="neon_d"/>
<reg name="d6" bitsize="64" type="neon_d"/>
<reg name="d7" bitsize="64" type="neon_d"/>
<reg name="d8" bitsize="64" type="neon_d"/>
<reg name="d9" bitsize="64" type="neon_d"/>
<reg name="d10" bitsize="64" type="neon_d"/>
<reg name="d11" bitsize="64" type="neon_d"/>
<reg name="d12" bitsize="64" type="neon_d"/>
<reg name="d13" bitsize="64" type="neon_d"/>
<reg name="d14" bitsize="64" type="neon_d"/>
<reg name="d15" bitsize="64" type="neon_d"/>
<reg name="d16" bitsize="64" type="neon_d"/>
<reg name="d17" bitsize="64" type="neon_d"/>
<reg name="d18" bitsize="64" type="neon_d"/>
<reg name="d19" bitsize="64" type="neon_d"/>
<reg name="d20" bitsize="64" type="neon_d"/>
<reg name="d21" bitsize="64" type="neon_d"/>
<reg name="d22" bitsize="64" type="neon_d"/>
<reg name="d23" bitsize="64" type="neon_d"/>
<reg name="d24" bitsize="64" type="neon_d"/>
<reg name="d25" bitsize="64" type="neon_d"/>
<reg name="d26" bitsize="64" type="neon_d"/>
<reg name="d27" bitsize="64" type="neon_d"/>
<reg name="d28" bitsize="64" type="neon_d"/>
<reg name="d29" bitsize="64" type="neon_d"/>
<reg name="d30" bitsize="64" type="neon_d"/>
<reg name="d31" bitsize="64" type="neon_d"/>
<reg name="q0" bitsize="128" type="neon_q" regnum="64"/>
<reg name="q1" bitsize="128" type="neon_q"/>
<reg name="q2" bitsize="128" type="neon_q"/>
<reg name="q3" bitsize="128" type="neon_q"/>
<reg name="q4" bitsize="128" type="neon_q"/>
<reg name="q5" bitsize="128" type="neon_q"/>
<reg name="q6" bitsize="128" type="neon_q"/>
<reg name="q7" bitsize="128" type="neon_q"/>
<reg name="q8" bitsize="128" type="neon_q"/>
<reg name="q9" bitsize="128" type="neon_q"/>
<reg name="q10" bitsize="128" type="neon_q"/>
<reg name="q10" bitsize="128" type="neon_q"/>
<reg name="q12" bitsize="128" type="neon_q"/>
<reg name="q13" bitsize="128" type="neon_q"/>
<reg name="q14" bitsize="128" type="neon_q"/>
<reg name="q15" bitsize="128" type="neon_q"/>
<reg name="fpscr" bitsize="32" type="int" group="float" regnum="80"/>
</feature>
</target>)";
return target_xml;
}
std::string GDBStubA32::RegRead(const Kernel::KThread* thread, size_t id) const {
if (!thread) {
return "";
}
const auto& context{thread->GetContext32()};
const auto& gprs{context.cpu_registers};
const auto& fprs{context.extension_registers};
if (id <= PC_REGISTER) {
return ValueToHex(gprs[id]);
} else if (id == CPSR_REGISTER) {
return ValueToHex(context.cpsr);
} else if (id >= D0_REGISTER && id < Q0_REGISTER) {
const u64 dN{GetSIMDRegister<u64>(fprs, id - D0_REGISTER)};
return ValueToHex(dN);
} else if (id >= Q0_REGISTER && id < FPSCR_REGISTER) {
const u128 qN{GetSIMDRegister<u128>(fprs, id - Q0_REGISTER)};
return ValueToHex(qN);
} else if (id == FPSCR_REGISTER) {
return ValueToHex(context.fpscr);
} else {
return "";
}
}
void GDBStubA32::RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const {
if (!thread) {
return;
}
auto& context{thread->GetContext32()};
auto& fprs{context.extension_registers};
if (id <= PC_REGISTER) {
context.cpu_registers[id] = HexToValue<u32>(value);
} else if (id == CPSR_REGISTER) {
context.cpsr = HexToValue<u32>(value);
} else if (id >= D0_REGISTER && id < Q0_REGISTER) {
PutSIMDRegister(fprs, id - D0_REGISTER, HexToValue<u64>(value));
} else if (id >= Q0_REGISTER && id < FPSCR_REGISTER) {
PutSIMDRegister(fprs, id - Q0_REGISTER, HexToValue<u128>(value));
} else if (id == FPSCR_REGISTER) {
context.fpscr = HexToValue<u32>(value);
}
}
std::string GDBStubA32::ReadRegisters(const Kernel::KThread* thread) const {
std::string output;
for (size_t reg = 0; reg <= FPSCR_REGISTER; reg++) {
const bool gpr{reg <= PC_REGISTER};
const bool dfpr{reg >= D0_REGISTER && reg < Q0_REGISTER};
const bool qfpr{reg >= Q0_REGISTER && reg < FPSCR_REGISTER};
if (!(gpr || dfpr || qfpr || reg == CPSR_REGISTER || reg == FPSCR_REGISTER)) {
continue;
}
output += RegRead(thread, reg);
}
return output;
}
void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view register_data) const {
for (size_t i = 0, reg = 0; reg <= FPSCR_REGISTER; reg++) {
const bool gpr{reg <= PC_REGISTER};
const bool dfpr{reg >= D0_REGISTER && reg < Q0_REGISTER};
const bool qfpr{reg >= Q0_REGISTER && reg < FPSCR_REGISTER};
if (gpr || reg == CPSR_REGISTER || reg == FPSCR_REGISTER) {
RegWrite(thread, reg, register_data.substr(i, 8));
i += 8;
} else if (dfpr) {
RegWrite(thread, reg, register_data.substr(i, 16));
i += 16;
} else if (qfpr) {
RegWrite(thread, reg, register_data.substr(i, 32));
i += 32;
}
if (reg == PC_REGISTER) {
reg = CPSR_REGISTER - 1;
} else if (reg == CPSR_REGISTER) {
reg = D0_REGISTER - 1;
}
}
}
std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
}
u32 GDBStubA32::BreakpointInstruction() const {
// A32: trap
// T32: trap + b #4
return 0xe7ffdefe;
}
} // namespace Core

View File

@@ -0,0 +1,68 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "common/common_types.h"
namespace Kernel {
class KThread;
}
namespace Core {
class GDBStubArch {
public:
virtual ~GDBStubArch() = default;
virtual std::string GetTargetXML() const = 0;
virtual std::string RegRead(const Kernel::KThread* thread, size_t id) const = 0;
virtual void RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const = 0;
virtual std::string ReadRegisters(const Kernel::KThread* thread) const = 0;
virtual void WriteRegisters(Kernel::KThread* thread, std::string_view register_data) const = 0;
virtual std::string ThreadStatus(const Kernel::KThread* thread, u8 signal) const = 0;
virtual u32 BreakpointInstruction() const = 0;
};
class GDBStubA64 final : public GDBStubArch {
public:
std::string GetTargetXML() const override;
std::string RegRead(const Kernel::KThread* thread, size_t id) const override;
void RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const override;
std::string ReadRegisters(const Kernel::KThread* thread) const override;
void WriteRegisters(Kernel::KThread* thread, std::string_view register_data) const override;
std::string ThreadStatus(const Kernel::KThread* thread, u8 signal) const override;
u32 BreakpointInstruction() const override;
private:
static constexpr u32 LR_REGISTER = 30;
static constexpr u32 SP_REGISTER = 31;
static constexpr u32 PC_REGISTER = 32;
static constexpr u32 PSTATE_REGISTER = 33;
static constexpr u32 Q0_REGISTER = 34;
static constexpr u32 FPSR_REGISTER = 66;
static constexpr u32 FPCR_REGISTER = 67;
};
class GDBStubA32 final : public GDBStubArch {
public:
std::string GetTargetXML() const override;
std::string RegRead(const Kernel::KThread* thread, size_t id) const override;
void RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const override;
std::string ReadRegisters(const Kernel::KThread* thread) const override;
void WriteRegisters(Kernel::KThread* thread, std::string_view register_data) const override;
std::string ThreadStatus(const Kernel::KThread* thread, u8 signal) const override;
u32 BreakpointInstruction() const override;
private:
static constexpr u32 SP_REGISTER = 13;
static constexpr u32 LR_REGISTER = 14;
static constexpr u32 PC_REGISTER = 15;
static constexpr u32 CPSR_REGISTER = 25;
static constexpr u32 D0_REGISTER = 32;
static constexpr u32 Q0_REGISTER = 64;
static constexpr u32 FPSCR_REGISTER = 80;
};
} // namespace Core

View File

@@ -419,7 +419,7 @@ std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type
Core::Crypto::Mode::ECB);
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
Core::Crypto::Key128 out;
Core::Crypto::Key128 out{};
if (type == NCASectionCryptoType::XTS) {
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {

View File

@@ -50,7 +50,7 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
low = mid + 1;
}
}
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
return {0, 0};
}
} // Anonymous namespace

View File

@@ -108,7 +108,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
ASSERT_MSG(false, "Invalid NCAContentType={:02X}", type);
return ContentRecordType{};
}
}

View File

@@ -144,7 +144,7 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
}
} else {
UNREACHABLE();
ASSERT(false);
return nullptr;
}

View File

@@ -65,7 +65,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
controller->Connect(true);
} else {
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
ASSERT_MSG(false, "Unable to add a new controller based on the given parameters!");
}
}

View File

@@ -25,6 +25,9 @@ constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
};
// Cortex-A57 supports 4 memory watchpoints
constexpr u64 NUM_WATCHPOINTS = 4;
} // namespace Hardware
} // namespace Core

View File

@@ -48,7 +48,7 @@ EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type) {
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}
@@ -77,7 +77,7 @@ const EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}

View File

@@ -141,7 +141,7 @@ public:
if (index < DomainHandlerCount()) {
domain_handlers[index] = nullptr;
} else {
UNREACHABLE_MSG("Unexpected handler index {}", index);
ASSERT_MSG(false, "Unexpected handler index {}", index);
}
}

View File

@@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
}
// If we've hit the end of a gap, free it.

View File

@@ -234,7 +234,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
@@ -287,7 +287,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{

View File

@@ -35,7 +35,7 @@ public:
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}
@@ -49,7 +49,7 @@ public:
case Svc::ArbitrationType::WaitIfEqual:
return WaitIfEqual(addr, value, timeout);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}

View File

@@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
ASSERT(false);
return 0;
}
@@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
ASSERT(IsAllowed39BitType(type));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
ASSERT(false);
return 0;
}

View File

@@ -18,7 +18,7 @@ namespace Kernel {
class KernelCore;
class KProcess;
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
\
private: \
friend class ::Kernel::KClassTokenGenerator; \
@@ -40,16 +40,19 @@ public:
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
virtual TypeObj GetTypeObj() const { \
virtual TypeObj GetTypeObj() ATTRIBUTE { \
return GetStaticTypeObj(); \
} \
virtual const char* GetTypeName() const { \
virtual const char* GetTypeName() ATTRIBUTE { \
return GetStaticTypeName(); \
} \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
@@ -82,7 +85,7 @@ protected:
};
private:
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {

View File

@@ -49,6 +49,7 @@ private:
}
}
}
UNREACHABLE();
}();
template <typename T>

View File

@@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
m_page_group =
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
m_page_group = {};
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
// Clear the memory.
//
// FIXME: this ends up clobbering address ranges outside the scope of the mapping within
// guest memory, and is not specifically required if the guest program is correctly
// written, so disable until this is further investigated.
//
// for (const auto& block : m_page_group.Nodes()) {
// std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
// }
for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.
m_owner->Open();
m_address = addr;
m_is_initialized = true;
m_is_owner_mapped = false;
@@ -57,8 +52,14 @@ void KCodeMemory::Finalize() {
// Unlock.
if (!m_is_mapped && !m_is_owner_mapped) {
const size_t size = m_page_group.GetNumPages() * PageSize;
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
}
// Close the page group.
m_page_group = {};
// Close our reference to our owner.
m_owner->Close();
}
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
@@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
k_perm = KMemoryPermission::UserReadExecute;
break;
default:
break;
// Already validated by ControlCodeMemory svc
UNREACHABLE();
}
// Map the memory.

View File

@@ -106,7 +106,7 @@ KConditionVariable::KConditionVariable(Core::System& system_)
KConditionVariable::~KConditionVariable() = default;
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
@@ -147,7 +147,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.

View File

@@ -15,8 +15,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
auto& scheduler = kernel.Scheduler(core_id);
auto& current_thread = *scheduler.GetCurrentThread();
auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
@@ -26,7 +25,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
scheduler.GetCurrentThread()->SetInterruptFlag();
GetCurrentThread(kernel).SetInterruptFlag();
}
}

View File

@@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool::SystemNonSecure;
} else {
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
return {};
}
}

View File

@@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
case FileSys::ProgramAddressSpaceType::Is39Bit:
return 39;
default:
UNREACHABLE();
ASSERT(false);
return {};
}
}
@@ -65,7 +65,6 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
ASSERT(start <= code_addr);
ASSERT(code_addr < code_addr + code_size);
ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -128,7 +127,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
const std::size_t needed_size{
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
UNREACHABLE();
ASSERT(false);
return ResultOutOfMemory;
}
@@ -542,6 +541,95 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
return ResultSuccess;
}
bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
const auto& pg = pg_ll.Nodes();
const auto& memory_layout = system.Kernel().MemoryLayout();
// Empty groups are necessarily invalid.
if (pg.empty()) {
return false;
}
// We're going to validate that the group we'd expect is the group we see.
auto cur_it = pg.begin();
PAddr cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() {
if (cur_block_pages == 0) {
if ((++cur_it) == pg.end()) {
return false;
}
cur_block_address = cur_it->GetAddress();
cur_block_pages = cur_it->GetNumPages();
}
return true;
};
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
return false;
}
// Prepare tracking variables.
PAddr cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
// Iterate, comparing expected to actual.
while (tot_size < size) {
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
return false;
}
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
return false;
}
cur_block_address += cur_size;
cur_block_pages -= cur_pages;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
// Ensure we compare the right amount for the last block.
if (tot_size > size) {
cur_size -= (tot_size - size);
}
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -1341,7 +1429,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
new_state = KMemoryState::AliasCodeData;
break;
default:
UNREACHABLE();
ASSERT(false);
}
}
@@ -1687,22 +1775,22 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
return this->LockMemoryAndOpen(
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None,
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Locked, nullptr);
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
const KPageLinkedList& pg) {
return this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1734,9 +1822,7 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
if (!paddr) {
UNREACHABLE();
}
ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
@@ -1767,7 +1853,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
break;
default:
UNREACHABLE();
ASSERT(false);
}
addr += size;
@@ -1798,7 +1884,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
case OperationType::ChangePermissionsAndRefresh:
break;
default:
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;
}
@@ -1835,7 +1921,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
return code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -1871,7 +1956,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
return code_region_end - code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -2125,7 +2209,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_
// Check the page group.
if (pg != nullptr) {
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
}
// Decide on new perm and attr.

View File

@@ -72,8 +72,8 @@ public:
KMemoryPermission perm, PAddr map_addr = 0);
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
@@ -178,6 +178,7 @@ private:
const KPageLinkedList* pg);
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();

View File

@@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
session_ptr->ClientConnected(server.AcceptSession());
} else {
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;

View File

@@ -57,14 +57,13 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
thread->DisableDispatch();
auto& kernel = system.Kernel();
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
{
KScopedSchedulerLock lock{kernel};
thread->SetState(ThreadState::Runnable);
if (system.DebuggerEnabled()) {
thread->RequestSuspend(SuspendType::Debug);
}
// Run our thread.
void(thread->Run());
}
} // Anonymous namespace
@@ -177,7 +176,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
KThread* cur_thread =
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -194,7 +194,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
KThread* cur_thread =
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -271,11 +272,15 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
shmem->Close();
}
void KProcess::RegisterThread(const KThread* thread) {
void KProcess::RegisterThread(KThread* thread) {
KScopedLightLock lk{list_lock};
thread_list.push_back(thread);
}
void KProcess::UnregisterThread(const KThread* thread) {
void KProcess::UnregisterThread(KThread* thread) {
KScopedLightLock lk{list_lock};
thread_list.remove(thread);
}
@@ -293,6 +298,50 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
ResultCode KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler.
KScopedLightLock lk{state_lock};
KScopedLightLock list_lk{list_lock};
KScopedSchedulerLock sl{kernel};
// Validate our state.
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
// Either pause or resume.
if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended.
if (is_suspended) {
return ResultInvalidState;
}
// Suspend all threads.
for (auto* thread : GetThreadList()) {
thread->RequestSuspend(SuspendType::Process);
}
// Set ourselves as suspended.
SetSuspended(true);
} else {
ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended.
if (!is_suspended) {
return ResultInvalidState;
}
// Resume all threads.
for (auto* thread : GetThreadList()) {
thread->Resume(SuspendType::Process);
}
// Set ourselves as resumed.
SetSuspended(false);
}
return ResultSuccess;
}
ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
std::size_t code_size) {
program_id = metadata.GetTitleID();
@@ -346,7 +395,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
break;
default:
UNREACHABLE();
ASSERT(false);
}
// Create TLS region
@@ -373,11 +422,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
for (auto& thread : in_thread_list) {
for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
if (thread == kernel.CurrentScheduler()->GetCurrentThread())
if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -532,6 +581,52 @@ ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) {
return ResultSuccess;
}
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
DebugWatchpointType type) {
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
return wp.type == DebugWatchpointType::None;
})};
if (watch == watchpoints.end()) {
return false;
}
watch->start_address = addr;
watch->end_address = addr + size;
watch->type = type;
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
debug_page_refcounts[page]++;
system.Memory().MarkRegionDebug(page, PageSize, true);
}
return true;
}
bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
DebugWatchpointType type) {
const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
})};
if (watch == watchpoints.end()) {
return false;
}
watch->start_address = 0;
watch->end_address = 0;
watch->type = DebugWatchpointType::None;
for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
debug_page_refcounts[page]--;
if (!debug_page_refcounts[page]) {
system.Memory().MarkRegionDebug(page, PageSize, false);
}
}
return true;
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
Svc::MemoryPermission permission) {
@@ -552,9 +647,10 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_},
page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_},
address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {}
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
kernel_.System())},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;

View File

@@ -7,6 +7,7 @@
#include <array>
#include <cstddef>
#include <list>
#include <map>
#include <string>
#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
@@ -63,6 +64,25 @@ enum class ProcessStatus {
DebugBreak,
};
enum class ProcessActivity : u32 {
Runnable,
Paused,
};
enum class DebugWatchpointType : u8 {
None = 0,
Read = 1 << 0,
Write = 1 << 1,
ReadOrWrite = Read | Write,
};
DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
struct DebugWatchpoint {
VAddr start_address;
VAddr end_address;
DebugWatchpointType type;
};
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
@@ -282,17 +302,17 @@ public:
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
const std::list<const KThread*>& GetThreadList() const {
std::list<KThread*>& GetThreadList() {
return thread_list;
}
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
void RegisterThread(const KThread* thread);
void RegisterThread(KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
void UnregisterThread(const KThread* thread);
void UnregisterThread(KThread* thread);
/// Clears the signaled state of the process if and only if it's signaled.
///
@@ -347,6 +367,8 @@ public:
void DoWorkerTaskImpl();
ResultCode SetActivity(ProcessActivity activity);
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
@@ -367,6 +389,19 @@ public:
// Frees a used TLS slot identified by the given address
ResultCode DeleteThreadLocalRegion(VAddr addr);
///////////////////////////////////////////////////////////////////////////////////////////////
// Debug watchpoint management
// Attempts to insert a watchpoint into a free slot. Returns false if none are available.
bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
// Attempts to remove the watchpoint specified by the given parameters.
bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
return watchpoints;
}
private:
void PinThread(s32 core_id, KThread* thread) {
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
@@ -442,7 +477,7 @@ private:
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<const KThread*> thread_list;
std::list<KThread*> thread_list;
/// List of shared memory that are running with this process as their owner.
std::list<KSharedMemoryInfo*> shared_memory_list;
@@ -471,10 +506,13 @@ private:
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
std::map<VAddr, u64> debug_page_refcounts;
KThread* exception_thread{};
KLightLock state_lock;
KLightLock list_lock;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;

View File

@@ -317,7 +317,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
{
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
if (best_thread == GetCurrentThread()) {
if (best_thread == GetCurrentThreadPointer(kernel)) {
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
}
@@ -424,7 +424,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -463,7 +463,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -551,7 +551,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -642,7 +642,7 @@ KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
KThread* KScheduler::GetCurrentThread() const {
KThread* KScheduler::GetSchedulerCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
}
@@ -654,7 +654,7 @@ u64 KScheduler::GetLastContextSwitchTicks() const {
}
void KScheduler::RescheduleCurrentCore() {
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
auto& phys_core = system.Kernel().PhysicalCore(core_id);
if (phys_core.IsInterrupted()) {
@@ -665,7 +665,7 @@ void KScheduler::RescheduleCurrentCore() {
if (state.needs_scheduling.load()) {
Schedule();
} else {
GetCurrentThread()->EnableDispatch();
GetCurrentThread(system.Kernel()).EnableDispatch();
guard.Unlock();
}
}
@@ -710,6 +710,7 @@ void KScheduler::Reload(KThread* thread) {
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
@@ -717,13 +718,18 @@ void KScheduler::Reload(KThread* thread) {
void KScheduler::SwitchContextStep2() {
// Load context of new thread
Reload(GetCurrentThread());
Reload(GetCurrentThreadPointer(system.Kernel()));
RescheduleCurrentCore();
}
void KScheduler::Schedule() {
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
this->ScheduleImpl();
}
void KScheduler::ScheduleImpl() {
KThread* previous_thread = GetCurrentThread();
KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling.store(false);
@@ -761,6 +767,7 @@ void KScheduler::ScheduleImpl() {
old_context = &previous_thread->GetHostContext();
// Set the new thread.
SetCurrentThread(system.Kernel(), next_thread);
current_thread.store(next_thread);
guard.Unlock();
@@ -804,6 +811,7 @@ void KScheduler::SwitchToCurrent() {
}
}
auto thread = next_thread ? next_thread : idle_thread;
SetCurrentThread(system.Kernel(), thread);
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
} while (!is_switch_pending());
}
@@ -829,6 +837,7 @@ void KScheduler::Initialize() {
idle_thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
idle_thread->EnableDispatch();
}
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)

View File

@@ -48,18 +48,13 @@ public:
void Reload(KThread* thread);
/// Gets the current running thread
[[nodiscard]] KThread* GetCurrentThread() const;
[[nodiscard]] KThread* GetSchedulerCurrentThread() const;
/// Gets the idle thread
[[nodiscard]] KThread* GetIdleThread() const {
return idle_thread;
}
/// Returns true if the scheduler is idle
[[nodiscard]] bool IsIdle() const {
return GetCurrentThread() == idle_thread;
}
/// Gets the timestamp for the last context switch in ticks.
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
@@ -149,10 +144,7 @@ private:
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
void Schedule() {
ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
this->ScheduleImpl();
}
void Schedule();
/// Switches the CPU's active thread context to that of the specified thread
void ScheduleImpl();

View File

@@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
"object_id {} is too big! This probably means a recent service call "
"to {} needed to return a new interface!",
object_id, name);
UNREACHABLE();
ASSERT(false);
return ResultSuccess; // Ignore error if asserts are off
}
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
return strong_ptr->HandleSyncRequest(*this, context);
} else {
UNREACHABLE();
ASSERT(false);
return ResultSuccess;
}

View File

@@ -133,7 +133,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNIMPLEMENTED();
break;
default:
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
thread_type = type;
@@ -198,6 +198,10 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
resource_limit_release_hint = false;
cpu_time = 0;
// Set debug context.
stack_top = user_stack_top;
argument = arg;
// Clear our stack parameters.
std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
sizeof(StackParameters));
@@ -221,7 +225,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Setup the stack parameters.
StackParameters& sp = GetStackParameters();
sp.cur_thread = this;
sp.disable_count = 0;
sp.disable_count = 1;
SetInExceptionHandler();
// Set thread ID.
@@ -263,15 +267,15 @@ ResultCode KThread::InitializeDummyThread(KThread* thread) {
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
Core::CpuManager::GetIdleThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
system.GetCpuManager().GetStartFuncParameter());
}
ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
Core::CpuManager::GetSuspendThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
Core::CpuManager::GetShutdownThreadStartFunc(),
system.GetCpuManager().GetStartFuncParameter());
}
ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
@@ -280,7 +284,7 @@ ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
system.GetCpuManager().GetStartFuncParameter());
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -378,7 +382,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
core_thread = kernel.Scheduler(i).GetCurrentThread();
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -627,7 +631,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -744,6 +748,19 @@ void KThread::Continue() {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
void KThread::WaitUntilSuspended() {
// Make sure we have a suspend requested.
ASSERT(IsSuspendRequested());
// Loop until the thread is not executing on any core.
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@@ -805,7 +822,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
if (kernel.Scheduler(i).GetCurrentThread() == this) {
if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -1010,8 +1027,6 @@ ResultCode KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
DisableDispatch();
return ResultSuccess;
}
}
@@ -1160,6 +1175,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
void SetCurrentThread(KernelCore& kernel, KThread* thread) {
kernel.SetCurrentEmuThread(thread);
}
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}

View File

@@ -100,6 +100,13 @@ enum class ThreadWaitReasonForDebugging : u32 {
Suspended, ///< Thread is waiting due to process suspension
};
enum class StepState : u32 {
NotStepping, ///< Thread is not currently stepping
StepPending, ///< Thread will step when next scheduled
StepPerformed, ///< Thread has stepped, waiting to be scheduled again
};
void SetCurrentThread(KernelCore& kernel, KThread* thread);
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
@@ -201,6 +208,8 @@ public:
void Continue();
void WaitUntilSuspended();
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
}
@@ -267,6 +276,14 @@ public:
void SetState(ThreadState state);
[[nodiscard]] StepState GetStepState() const {
return step_state;
}
void SetStepState(StepState state) {
step_state = state;
}
[[nodiscard]] s64 GetLastScheduledTick() const {
return last_scheduled_tick;
}
@@ -646,6 +663,14 @@ public:
void IfDummyThreadTryWait();
void IfDummyThreadEndWait();
[[nodiscard]] uintptr_t GetArgument() const {
return argument;
}
[[nodiscard]] VAddr GetUserStackTop() const {
return stack_top;
}
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -769,6 +794,7 @@ private:
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
StepState step_state{};
std::mutex dummy_wait_lock;
std::condition_variable dummy_wait_cv;
@@ -776,6 +802,8 @@ private:
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
uintptr_t argument;
VAddr stack_top;
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;

View File

@@ -76,7 +76,7 @@ struct KernelCore::Impl {
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeSchedulers();
InitializeSuspendThreads();
InitializeShutdownThreads();
InitializePreemption(kernel);
RegisterHostThread();
@@ -143,9 +143,9 @@ struct KernelCore::Impl {
CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
if (shutdown_threads[core_id]) {
shutdown_threads[core_id]->Close();
shutdown_threads[core_id] = nullptr;
}
schedulers[core_id]->Finalize();
@@ -212,7 +212,9 @@ struct KernelCore::Impl {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
const auto total_size{sizes.first};
const auto kernel_size{sizes.second};
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -245,13 +247,13 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
}
void InitializeSuspendThreads() {
void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
suspend_threads[core_id] = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
shutdown_threads[core_id] = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
}
}
@@ -329,6 +331,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
static inline thread_local KThread* current_thread{nullptr};
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -339,7 +343,12 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
return schedulers[thread_id]->GetCurrentThread();
return current_thread;
}
void SetCurrentEmuThread(KThread* thread) {
current_thread = thread;
}
void DeriveInitialMemoryLayout() {
@@ -766,7 +775,7 @@ struct KernelCore::Impl {
std::weak_ptr<ServiceThread> default_service_thread;
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -917,6 +926,12 @@ const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return *impl->global_object_list_container;
}
void KernelCore::InterruptAllPhysicalCores() {
for (auto& physical_core : impl->cores) {
physical_core.Interrupt();
}
}
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
physical_core.ArmInterface().ClearInstructionCache();
@@ -1016,6 +1031,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
void KernelCore::SetCurrentEmuThread(KThread* thread) {
impl->SetCurrentEmuThread(thread);
}
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1064,22 +1083,29 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
return *impl->hidbus_shared_mem;
}
void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
KScopedSchedulerLock lock(*this);
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
void KernelCore::Suspend(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
for (auto* process : GetProcessList()) {
process->SetActivity(activity);
if (should_suspend) {
// Wait for execution to stop
for (auto* thread : process->GetThreadList()) {
thread->WaitUntilSuspended();
}
}
}
}
void KernelCore::ShutdownCores() {
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {
return impl->is_multicore;
}

View File

@@ -184,6 +184,8 @@ public:
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
void InterruptAllPhysicalCores();
void InvalidateAllInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
@@ -224,6 +226,9 @@ public:
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
/// Sets the current guest_thread pointer.
void SetCurrentEmuThread(KThread* thread);
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
@@ -269,12 +274,15 @@ public:
/// Gets the shared memory object for HIDBus services.
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
/// Suspend/unsuspend the OS.
void Suspend(bool in_suspention);
/// Suspend/unsuspend all processes.
void Suspend(bool suspend);
/// Exceptional exit the OS.
/// Exceptional exit all processes.
void ExceptionalExit();
/// Notify emulated CPU cores to shut down.
void ShutdownCores();
bool IsMulticore() const;
bool IsShuttingDown() const;

View File

@@ -15,6 +15,7 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/debugger/debugger.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_code_memory.h"
@@ -326,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
@@ -336,7 +336,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
}
return thread->GetWaitResult();
return GetCurrentThread(kernel).GetWaitResult();
}
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -623,10 +623,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
if (system.DebuggerEnabled()) {
auto* thread = system.Kernel().GetCurrentEmuThread();
system.GetDebugger().NotifyThreadStopped(thread);
thread->RequestSuspend(Kernel::SuspendType::Debug);
}
}
static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
@@ -685,6 +691,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
// 6.0.0+
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
// Homebrew only
MesosphereCurrentProcess = 65001,
};
const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -877,7 +886,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
const auto* const current_thread = scheduler.GetCurrentThread();
const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
@@ -907,6 +916,27 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
case GetInfoType::MesosphereCurrentProcess: {
// Verify the input handle is invalid.
R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
// Verify the sub-type is valid.
R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
// Get the handle table.
KProcess* current_process = system.Kernel().CurrentProcess();
KHandleTable& handle_table = current_process->GetHandleTable();
// Get a new handle for the current process.
Handle tmp;
R_TRY(handle_table.Add(&tmp, current_process));
// Set the output.
*result = tmp;
// We succeeded.
return ResultSuccess;
}
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
@@ -1096,7 +1126,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
if (thread->GetRawState() != ThreadState::Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
@@ -1719,11 +1749,12 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
/// Exits the current process
static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
UNIMPLEMENTED();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
"Process has already exited");
system.Exit();
}
static void ExitProcess32(Core::System& system) {
@@ -1843,7 +1874,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
@@ -1876,7 +1907,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
KScheduler::YieldToAnyThread(kernel);
} else {
// Nintendo does nothing at all if an otherwise invalid value is passed.
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}
@@ -2530,7 +2561,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultOutOfRange;
}
const auto* const current_process = system.Kernel().CurrentProcess();
auto* const current_process = system.Kernel().CurrentProcess();
const auto total_copy_size = out_thread_ids_size * sizeof(u64);
if (out_thread_ids_size > 0 &&
@@ -2982,11 +3013,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
}
void Call(Core::System& system, u32 immediate) {
system.ExitDynarmicProfile();
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
auto* thread = GetCurrentThreadPointer(kernel);
thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
@@ -3007,8 +3037,6 @@ void Call(Core::System& system, u32 immediate) {
auto* host_context = thread->GetHostContext().get();
host_context->Rewind();
}
system.EnterDynarmicProfile();
}
} // namespace Kernel::Svc

View File

@@ -686,7 +686,7 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system_,
{66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"},
{67, nullptr, "CancelCpuBoostMode"},
{68, nullptr, "GetBuiltInDisplayType"},
{80, nullptr, "PerformSystemButtonPressingIfInFocus"},
{80, &ICommonStateGetter::PerformSystemButtonPressingIfInFocus, "PerformSystemButtonPressingIfInFocus"},
{90, nullptr, "SetPerformanceConfigurationChangedNotification"},
{91, nullptr, "GetCurrentPerformanceConfiguration"},
{100, nullptr, "SetHandlingHomeButtonShortPressedEnabled"},
@@ -826,6 +826,16 @@ void ICommonStateGetter::SetCpuBoostMode(Kernel::HLERequestContext& ctx) {
apm_sys->SetCpuBoostMode(ctx);
}
void ICommonStateGetter::PerformSystemButtonPressingIfInFocus(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto system_button{rp.PopEnum<SystemButtonType>()};
LOG_WARNING(Service_AM, "(STUBBED) called, system_button={}", system_button);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void ICommonStateGetter::SetRequestExitToLibraryAppletAtExecuteNextProgramEnabled(
Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");

View File

@@ -220,6 +220,18 @@ private:
Docked = 1,
};
// This is nn::am::service::SystemButtonType
enum class SystemButtonType {
None,
HomeButtonShortPressing,
HomeButtonLongPressing,
PowerButtonShortPressing,
PowerButtonLongPressing,
ShutdownSystem,
CaptureButtonShortPressing,
CaptureButtonLongPressing,
};
void GetEventHandle(Kernel::HLERequestContext& ctx);
void ReceiveMessage(Kernel::HLERequestContext& ctx);
void GetCurrentFocusState(Kernel::HLERequestContext& ctx);
@@ -234,6 +246,7 @@ private:
void EndVrModeEx(Kernel::HLERequestContext& ctx);
void GetDefaultDisplayResolution(Kernel::HLERequestContext& ctx);
void SetCpuBoostMode(Kernel::HLERequestContext& ctx);
void PerformSystemButtonPressingIfInFocus(Kernel::HLERequestContext& ctx);
void SetRequestExitToLibraryAppletAtExecuteNextProgramEnabled(Kernel::HLERequestContext& ctx);
std::shared_ptr<AppletMessageQueue> msg_queue;

View File

@@ -178,7 +178,7 @@ ResultCode Controller::GetStatus() const {
}
void Controller::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void Controller::Execute() {

View File

@@ -156,7 +156,7 @@ ResultCode Error::GetStatus() const {
}
void Error::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data!");
ASSERT_MSG(false, "Unexpected interactive applet data!");
}
void Error::Execute() {

View File

@@ -76,7 +76,7 @@ ResultCode Auth::GetStatus() const {
}
void Auth::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void Auth::Execute() {
@@ -175,7 +175,7 @@ ResultCode PhotoViewer::GetStatus() const {
}
void PhotoViewer::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void PhotoViewer::Execute() {

View File

@@ -67,7 +67,7 @@ ResultCode MiiEdit::GetStatus() const {
}
void MiiEdit::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void MiiEdit::Execute() {

View File

@@ -44,7 +44,7 @@ ResultCode ProfileSelect::GetStatus() const {
}
void ProfileSelect::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void ProfileSelect::Execute() {

View File

@@ -71,7 +71,7 @@ void SoftwareKeyboard::Initialize() {
InitializeBackground(applet_mode);
break;
default:
UNREACHABLE_MSG("Invalid LibraryAppletMode={}", applet_mode);
ASSERT_MSG(false, "Invalid LibraryAppletMode={}", applet_mode);
break;
}
}

View File

@@ -279,7 +279,7 @@ void WebBrowser::Initialize() {
InitializeLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
break;
}
}
@@ -320,7 +320,7 @@ void WebBrowser::Execute() {
ExecuteLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
WebBrowserExit(WebExitReason::EndButtonPressed);
break;
}

View File

@@ -899,7 +899,7 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
case FileSys::SaveDataSpaceId::TemporaryStorage:
case FileSys::SaveDataSpaceId::ProperSystem:
case FileSys::SaveDataSpaceId::SafeMode:
UNREACHABLE();
ASSERT(false);
}
auto filesystem = std::make_shared<IFileSystem>(system, std::move(dir.Unwrap()),

View File

@@ -1,6 +1,11 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cstring>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/glue/notif.h"
@@ -9,11 +14,11 @@ namespace Service::Glue {
NOTIF_A::NOTIF_A(Core::System& system_) : ServiceFramework{system_, "notif:a"} {
// clang-format off
static const FunctionInfo functions[] = {
{500, nullptr, "RegisterAlarmSetting"},
{510, nullptr, "UpdateAlarmSetting"},
{500, &NOTIF_A::RegisterAlarmSetting, "RegisterAlarmSetting"},
{510, &NOTIF_A::UpdateAlarmSetting, "UpdateAlarmSetting"},
{520, &NOTIF_A::ListAlarmSettings, "ListAlarmSettings"},
{530, nullptr, "LoadApplicationParameter"},
{540, nullptr, "DeleteAlarmSetting"},
{530, &NOTIF_A::LoadApplicationParameter, "LoadApplicationParameter"},
{540, &NOTIF_A::DeleteAlarmSetting, "DeleteAlarmSetting"},
{1000, &NOTIF_A::Initialize, "Initialize"},
};
// clang-format on
@@ -23,21 +28,132 @@ NOTIF_A::NOTIF_A(Core::System& system_) : ServiceFramework{system_, "notif:a"} {
NOTIF_A::~NOTIF_A() = default;
void NOTIF_A::ListAlarmSettings(Kernel::HLERequestContext& ctx) {
// Returns an array of AlarmSetting
constexpr s32 alarm_count = 0;
void NOTIF_A::RegisterAlarmSetting(Kernel::HLERequestContext& ctx) {
const auto alarm_setting_buffer_size = ctx.GetReadBufferSize(0);
const auto application_parameter_size = ctx.GetReadBufferSize(1);
LOG_WARNING(Service_NOTIF, "(STUBBED) called");
ASSERT_MSG(alarm_setting_buffer_size == sizeof(AlarmSetting),
"alarm_setting_buffer_size is not 0x40 bytes");
ASSERT_MSG(application_parameter_size <= sizeof(ApplicationParameter),
"application_parameter_size is bigger than 0x400 bytes");
AlarmSetting new_alarm{};
memcpy(&new_alarm, ctx.ReadBuffer(0).data(), sizeof(AlarmSetting));
// TODO: Count alarms per game id
if (alarms.size() >= max_alarms) {
LOG_ERROR(Service_NOTIF, "Alarm limit reached");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultUnknown);
return;
}
new_alarm.alarm_setting_id = last_alarm_setting_id++;
alarms.push_back(new_alarm);
// TODO: Save application parameter data
LOG_WARNING(Service_NOTIF,
"(STUBBED) called, application_parameter_size={}, setting_id={}, kind={}, muted={}",
application_parameter_size, new_alarm.alarm_setting_id, new_alarm.kind,
new_alarm.muted);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
rb.Push(new_alarm.alarm_setting_id);
}
void NOTIF_A::UpdateAlarmSetting(Kernel::HLERequestContext& ctx) {
const auto alarm_setting_buffer_size = ctx.GetReadBufferSize(0);
const auto application_parameter_size = ctx.GetReadBufferSize(1);
ASSERT_MSG(alarm_setting_buffer_size == sizeof(AlarmSetting),
"alarm_setting_buffer_size is not 0x40 bytes");
ASSERT_MSG(application_parameter_size <= sizeof(ApplicationParameter),
"application_parameter_size is bigger than 0x400 bytes");
AlarmSetting alarm_setting{};
memcpy(&alarm_setting, ctx.ReadBuffer(0).data(), sizeof(AlarmSetting));
const auto alarm_it = GetAlarmFromId(alarm_setting.alarm_setting_id);
if (alarm_it != alarms.end()) {
LOG_DEBUG(Service_NOTIF, "Alarm updated");
*alarm_it = alarm_setting;
// TODO: Save application parameter data
}
LOG_WARNING(Service_NOTIF,
"(STUBBED) called, application_parameter_size={}, setting_id={}, kind={}, muted={}",
application_parameter_size, alarm_setting.alarm_setting_id, alarm_setting.kind,
alarm_setting.muted);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void NOTIF_A::ListAlarmSettings(Kernel::HLERequestContext& ctx) {
LOG_INFO(Service_NOTIF, "called, alarm_count={}", alarms.size());
// TODO: Only return alarms of this game id
ctx.WriteBuffer(alarms);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
rb.Push(alarm_count);
rb.Push(static_cast<u32>(alarms.size()));
}
void NOTIF_A::LoadApplicationParameter(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto alarm_setting_id{rp.Pop<AlarmSettingId>()};
const auto alarm_it = GetAlarmFromId(alarm_setting_id);
if (alarm_it == alarms.end()) {
LOG_ERROR(Service_NOTIF, "Invalid alarm setting id={}", alarm_setting_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultUnknown);
return;
}
// TODO: Read application parameter related to this setting id
ApplicationParameter application_parameter{};
LOG_WARNING(Service_NOTIF, "(STUBBED) called, alarm_setting_id={}", alarm_setting_id);
ctx.WriteBuffer(application_parameter);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
rb.Push(static_cast<u32>(application_parameter.size()));
}
void NOTIF_A::DeleteAlarmSetting(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto alarm_setting_id{rp.Pop<AlarmSettingId>()};
std::erase_if(alarms, [alarm_setting_id](const AlarmSetting& alarm) {
return alarm.alarm_setting_id == alarm_setting_id;
});
LOG_INFO(Service_NOTIF, "called, alarm_setting_id={}", alarm_setting_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void NOTIF_A::Initialize(Kernel::HLERequestContext& ctx) {
// TODO: Load previous alarms from config
LOG_WARNING(Service_NOTIF, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
std::vector<NOTIF_A::AlarmSetting>::iterator NOTIF_A::GetAlarmFromId(
AlarmSettingId alarm_setting_id) {
return std::find_if(alarms.begin(), alarms.end(),
[alarm_setting_id](const AlarmSetting& alarm) {
return alarm.alarm_setting_id == alarm_setting_id;
});
}
} // namespace Service::Glue

View File

@@ -3,6 +3,10 @@
#pragma once
#include <array>
#include <vector>
#include "common/uuid.h"
#include "core/hle/service/service.h"
namespace Core {
@@ -17,8 +21,52 @@ public:
~NOTIF_A() override;
private:
static constexpr std::size_t max_alarms = 8;
// This is nn::notification::AlarmSettingId
using AlarmSettingId = u16;
static_assert(sizeof(AlarmSettingId) == 0x2, "AlarmSettingId is an invalid size");
using ApplicationParameter = std::array<u8, 0x400>;
static_assert(sizeof(ApplicationParameter) == 0x400, "ApplicationParameter is an invalid size");
struct DailyAlarmSetting {
s8 hour;
s8 minute;
};
static_assert(sizeof(DailyAlarmSetting) == 0x2, "DailyAlarmSetting is an invalid size");
struct WeeklyScheduleAlarmSetting {
INSERT_PADDING_BYTES(0xA);
std::array<DailyAlarmSetting, 0x7> day_of_week;
};
static_assert(sizeof(WeeklyScheduleAlarmSetting) == 0x18,
"WeeklyScheduleAlarmSetting is an invalid size");
// This is nn::notification::AlarmSetting
struct AlarmSetting {
AlarmSettingId alarm_setting_id;
u8 kind;
u8 muted;
INSERT_PADDING_BYTES(0x4);
Common::UUID account_id;
u64 application_id;
INSERT_PADDING_BYTES(0x8);
WeeklyScheduleAlarmSetting schedule;
};
static_assert(sizeof(AlarmSetting) == 0x40, "AlarmSetting is an invalid size");
void RegisterAlarmSetting(Kernel::HLERequestContext& ctx);
void UpdateAlarmSetting(Kernel::HLERequestContext& ctx);
void ListAlarmSettings(Kernel::HLERequestContext& ctx);
void LoadApplicationParameter(Kernel::HLERequestContext& ctx);
void DeleteAlarmSetting(Kernel::HLERequestContext& ctx);
void Initialize(Kernel::HLERequestContext& ctx);
std::vector<AlarmSetting>::iterator GetAlarmFromId(AlarmSettingId alarm_setting_id);
std::vector<AlarmSetting> alarms{};
AlarmSettingId last_alarm_setting_id{};
};
} // namespace Service::Glue

View File

@@ -61,6 +61,7 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
}
last_update_timestamp = shared_memory->gesture_lifo.timestamp;
UpdateGestureSharedMemory(gesture, time_difference);
}
void Controller_Gesture::ReadTouchInput() {
@@ -94,8 +95,7 @@ bool Controller_Gesture::ShouldUpdateGesture(const GestureProperties& gesture,
return false;
}
void Controller_Gesture::UpdateGestureSharedMemory(u8* data, std::size_t size,
GestureProperties& gesture,
void Controller_Gesture::UpdateGestureSharedMemory(GestureProperties& gesture,
f32 time_difference) {
GestureType type = GestureType::Idle;
GestureAttribute attributes{};

View File

@@ -107,8 +107,7 @@ private:
bool ShouldUpdateGesture(const GestureProperties& gesture, f32 time_difference);
// Updates the shared memory to the next state
void UpdateGestureSharedMemory(u8* data, std::size_t size, GestureProperties& gesture,
f32 time_difference);
void UpdateGestureSharedMemory(GestureProperties& gesture, f32 time_difference);
// Initializes new gesture
void NewGesture(GestureProperties& gesture, GestureType& type, GestureAttribute& attributes);

View File

@@ -160,7 +160,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
shared_memory->system_properties.raw = 0;
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
shared_memory->style_tag.fullkey.Assign(1);
@@ -422,7 +422,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
libnx_state.connection_status.is_connected.Assign(1);
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::NES:
@@ -597,7 +597,7 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::Pokeball:
@@ -856,7 +856,7 @@ void Controller_NPad::VibrateController(
}
if (vibration_device_handle.device_index == Core::HID::DeviceIndex::None) {
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
return;
}

View File

@@ -1441,7 +1441,7 @@ void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
break;
case Core::HID::DeviceIndex::None:
default:
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
break;
}

View File

@@ -5,7 +5,9 @@
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/hid/errors.h"
#include "core/hle/service/hid/irs.h"
namespace Service::HID {
@@ -38,21 +40,32 @@ IRS::IRS(Core::System& system_) : ServiceFramework{system_, "irs"} {
}
void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::DeactivateIrsensor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::GetIrsensorSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_IRS, "called");
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_IRS, "called, applet_resource_user_id={}", applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(ResultSuccess);
@@ -60,35 +73,109 @@ void IRS::GetIrsensorSharedMemoryHandle(Kernel::HLERequestContext& ctx) {
}
void IRS::StopImageProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::RunMomentProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
PackedMomentProcessorConfig processor_config;
};
static_assert(sizeof(Parameters) == 0x30, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::RunClusteringProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
PackedClusteringProcessorConfig processor_config;
};
static_assert(sizeof(Parameters) == 0x40, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::RunImageTransferProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
PackedImageTransferProcessorConfig processor_config;
u32 transfer_memory_size;
};
static_assert(sizeof(Parameters) == 0x30, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto t_mem_handle{ctx.GetCopyHandle(0)};
auto t_mem =
system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(t_mem_handle);
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, transfer_memory_size={}, "
"applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.transfer_memory_size, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 5};
rb.Push(ResultSuccess);
@@ -97,71 +184,195 @@ void IRS::GetImageTransferProcessorState(Kernel::HLERequestContext& ctx) {
}
void IRS::RunTeraPluginProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto camera_handle{rp.PopRaw<IrCameraHandle>()};
const auto processor_config{rp.PopRaw<PackedTeraPluginProcessorConfig>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, mode={}, mcu_version={}.{}, "
"applet_resource_user_id={}",
camera_handle.npad_type, camera_handle.npad_id, processor_config.mode,
processor_config.required_mcu_version.major,
processor_config.required_mcu_version.minor, applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::GetNpadIrCameraHandle(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto npad_id{rp.PopEnum<Core::HID::NpadIdType>()};
if (npad_id > Core::HID::NpadIdType::Player8 && npad_id != Core::HID::NpadIdType::Invalid &&
npad_id != Core::HID::NpadIdType::Handheld) {
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(InvalidNpadId);
return;
}
IrCameraHandle camera_handle{
.npad_id = static_cast<u8>(NpadIdTypeToIndex(npad_id)),
.npad_type = Core::HID::NpadStyleIndex::None,
};
LOG_WARNING(Service_IRS, "(STUBBED) called, npad_id={}, camera_npad_id={}, camera_npad_type={}",
npad_id, camera_handle.npad_id, camera_handle.npad_type);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
rb.PushRaw<u32>(device_handle);
rb.PushRaw(camera_handle);
}
void IRS::RunPointingProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto camera_handle{rp.PopRaw<IrCameraHandle>()};
const auto processor_config{rp.PopRaw<PackedPointingProcessorConfig>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(
Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, mcu_version={}.{}, applet_resource_user_id={}",
camera_handle.npad_type, camera_handle.npad_id, processor_config.required_mcu_version.major,
processor_config.required_mcu_version.minor, applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::SuspendImageProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::CheckFirmwareVersion(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto camera_handle{rp.PopRaw<IrCameraHandle>()};
const auto mcu_version{rp.PopRaw<PackedMcuVersion>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(
Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}, mcu_version={}.{}",
camera_handle.npad_type, camera_handle.npad_id, applet_resource_user_id, mcu_version.major,
mcu_version.minor);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::SetFunctionLevel(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
PackedFunctionLevel function_level;
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::RunImageTransferExProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
PackedImageTransferProcessorExConfig processor_config;
u64 transfer_memory_size;
};
static_assert(sizeof(Parameters) == 0x38, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
const auto t_mem_handle{ctx.GetCopyHandle(0)};
auto t_mem =
system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(t_mem_handle);
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, transfer_memory_size={}, "
"applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.transfer_memory_size, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::RunIrLedProcessor(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
const auto camera_handle{rp.PopRaw<IrCameraHandle>()};
const auto processor_config{rp.PopRaw<PackedIrLedProcessorConfig>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, light_target={}, mcu_version={}.{} "
"applet_resource_user_id={}",
camera_handle.npad_type, camera_handle.npad_id, processor_config.light_target,
processor_config.required_mcu_version.major,
processor_config.required_mcu_version.minor, applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::StopImageProcessorAsync(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
IrCameraHandle camera_handle;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS,
"(STUBBED) called, npad_type={}, npad_id={}, applet_resource_user_id={}",
parameters.camera_handle.npad_type, parameters.camera_handle.npad_id,
parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);
}
void IRS::ActivateIrsensorWithFunctionLevel(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_IRS, "(STUBBED) called");
IPC::RequestParser rp{ctx};
struct Parameters {
PackedFunctionLevel function_level;
INSERT_PADDING_WORDS_NOINIT(1);
u64 applet_resource_user_id;
};
static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
const auto parameters{rp.PopRaw<Parameters>()};
LOG_WARNING(Service_IRS, "(STUBBED) called, function_level={}, applet_resource_user_id={}",
parameters.function_level.function_level, parameters.applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ResultSuccess);

View File

@@ -3,6 +3,7 @@
#pragma once
#include "core/hid/hid_types.h"
#include "core/hle/service/service.h"
namespace Core {
@@ -17,6 +18,235 @@ public:
~IRS() override;
private:
// This is nn::irsensor::IrCameraStatus
enum IrCameraStatus : u32 {
Available,
Unsupported,
Unconnected,
};
// This is nn::irsensor::IrCameraInternalStatus
enum IrCameraInternalStatus : u32 {
Stopped,
FirmwareUpdateNeeded,
Unkown2,
Unkown3,
Unkown4,
FirmwareVersionRequested,
FirmwareVersionIsInvalid,
Ready,
Setting,
};
// This is nn::irsensor::detail::StatusManager::IrSensorMode
enum IrSensorMode : u64 {
None,
MomentProcessor,
ClusteringProcessor,
ImageTransferProcessor,
PointingProcessorMarker,
TeraPluginProcessor,
IrLedProcessor,
};
// This is nn::irsensor::ImageProcessorStatus
enum ImageProcessorStatus : u8 {
stopped,
running,
};
// This is nn::irsensor::ImageTransferProcessorFormat
enum ImageTransferProcessorFormat : u8 {
Size320x240,
Size160x120,
Size80x60,
Size40x30,
Size20x15,
};
// This is nn::irsensor::AdaptiveClusteringMode
enum AdaptiveClusteringMode : u8 {
StaticFov,
DynamicFov,
};
// This is nn::irsensor::AdaptiveClusteringTargetDistance
enum AdaptiveClusteringTargetDistance : u8 {
Near,
Middle,
Far,
};
// This is nn::irsensor::IrsHandAnalysisMode
enum IrsHandAnalysisMode : u8 {
Silhouette,
Image,
SilhoueteAndImage,
SilhuetteOnly,
};
// This is nn::irsensor::IrSensorFunctionLevel
enum IrSensorFunctionLevel : u8 {
unknown0,
unknown1,
unknown2,
unknown3,
unknown4,
};
// This is nn::irsensor::IrCameraHandle
struct IrCameraHandle {
u8 npad_id{};
Core::HID::NpadStyleIndex npad_type{Core::HID::NpadStyleIndex::None};
INSERT_PADDING_BYTES(2);
};
static_assert(sizeof(IrCameraHandle) == 4, "IrCameraHandle is an invalid size");
struct IrsRect {
s16 x;
s16 y;
s16 width;
s16 height;
};
// This is nn::irsensor::PackedMcuVersion
struct PackedMcuVersion {
u16 major;
u16 minor;
};
static_assert(sizeof(PackedMcuVersion) == 4, "PackedMcuVersion is an invalid size");
// This is nn::irsensor::MomentProcessorConfig
struct MomentProcessorConfig {
u64 exposire_time;
u8 light_target;
u8 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(7);
IrsRect window_of_interest;
u8 preprocess;
u8 preprocess_intensity_threshold;
INSERT_PADDING_BYTES(5);
};
static_assert(sizeof(MomentProcessorConfig) == 0x28,
"MomentProcessorConfig is an invalid size");
// This is nn::irsensor::PackedMomentProcessorConfig
struct PackedMomentProcessorConfig {
u64 exposire_time;
u8 light_target;
u8 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(5);
IrsRect window_of_interest;
PackedMcuVersion required_mcu_version;
u8 preprocess;
u8 preprocess_intensity_threshold;
INSERT_PADDING_BYTES(2);
};
static_assert(sizeof(PackedMomentProcessorConfig) == 0x20,
"PackedMomentProcessorConfig is an invalid size");
// This is nn::irsensor::ClusteringProcessorConfig
struct ClusteringProcessorConfig {
u64 exposire_time;
u32 light_target;
u32 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(7);
IrsRect window_of_interest;
u32 pixel_count_min;
u32 pixel_count_max;
u32 object_intensity_min;
u8 is_external_light_filter_enabled;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(ClusteringProcessorConfig) == 0x30,
"ClusteringProcessorConfig is an invalid size");
// This is nn::irsensor::PackedClusteringProcessorConfig
struct PackedClusteringProcessorConfig {
u64 exposire_time;
u8 light_target;
u8 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(5);
IrsRect window_of_interest;
PackedMcuVersion required_mcu_version;
u32 pixel_count_min;
u32 pixel_count_max;
u32 object_intensity_min;
u8 is_external_light_filter_enabled;
INSERT_PADDING_BYTES(2);
};
static_assert(sizeof(PackedClusteringProcessorConfig) == 0x30,
"PackedClusteringProcessorConfig is an invalid size");
// This is nn::irsensor::PackedImageTransferProcessorConfig
struct PackedImageTransferProcessorConfig {
u64 exposire_time;
u8 light_target;
u8 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(5);
PackedMcuVersion required_mcu_version;
u8 format;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(PackedImageTransferProcessorConfig) == 0x18,
"PackedImageTransferProcessorConfig is an invalid size");
// This is nn::irsensor::PackedTeraPluginProcessorConfig
struct PackedTeraPluginProcessorConfig {
PackedMcuVersion required_mcu_version;
u8 mode;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(PackedTeraPluginProcessorConfig) == 0x8,
"PackedTeraPluginProcessorConfig is an invalid size");
// This is nn::irsensor::PackedPointingProcessorConfig
struct PackedPointingProcessorConfig {
IrsRect window_of_interest;
PackedMcuVersion required_mcu_version;
};
static_assert(sizeof(PackedPointingProcessorConfig) == 0xC,
"PackedPointingProcessorConfig is an invalid size");
// This is nn::irsensor::PackedFunctionLevel
struct PackedFunctionLevel {
IrSensorFunctionLevel function_level;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(PackedFunctionLevel) == 0x4, "PackedFunctionLevel is an invalid size");
// This is nn::irsensor::PackedImageTransferProcessorExConfig
struct PackedImageTransferProcessorExConfig {
u64 exposire_time;
u8 light_target;
u8 gain;
u8 is_negative_used;
INSERT_PADDING_BYTES(5);
PackedMcuVersion required_mcu_version;
ImageTransferProcessorFormat origin_format;
ImageTransferProcessorFormat trimming_format;
u16 trimming_start_x;
u16 trimming_start_y;
u8 is_external_light_filter_enabled;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(PackedImageTransferProcessorExConfig) == 0x20,
"PackedImageTransferProcessorExConfig is an invalid size");
// This is nn::irsensor::PackedIrLedProcessorConfig
struct PackedIrLedProcessorConfig {
PackedMcuVersion required_mcu_version;
u8 light_target;
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(PackedIrLedProcessorConfig) == 0x8,
"PackedIrLedProcessorConfig is an invalid size");
void ActivateIrsensor(Kernel::HLERequestContext& ctx);
void DeactivateIrsensor(Kernel::HLERequestContext& ctx);
void GetIrsensorSharedMemoryHandle(Kernel::HLERequestContext& ctx);
@@ -35,8 +265,6 @@ private:
void RunIrLedProcessor(Kernel::HLERequestContext& ctx);
void StopImageProcessorAsync(Kernel::HLERequestContext& ctx);
void ActivateIrsensorWithFunctionLevel(Kernel::HLERequestContext& ctx);
const u32 device_handle{0xABCD};
};
class IRS_SYS final : public ServiceFramework<IRS_SYS> {

View File

@@ -11,10 +11,13 @@
#include "common/alignment.h"
#include "common/common_funcs.h"
#include "common/div_ceil.h"
#include "common/elf.h"
#include "common/logging/log.h"
#include "core/hle/service/jit/jit_context.h"
#include "core/memory.h"
using namespace Common::ELF;
namespace Service::JIT {
constexpr std::array<u8, 8> SVC0_ARM64 = {
@@ -26,25 +29,6 @@ constexpr std::array HELPER_FUNCTIONS{
"_stop", "_resolve", "_panic", "memcpy", "memmove", "memset",
};
struct Elf64_Dyn {
u64 d_tag;
u64 d_un;
};
struct Elf64_Rela {
u64 r_offset;
u64 r_info;
s64 r_addend;
};
static constexpr u32 Elf64_RelaType(const Elf64_Rela* rela) {
return static_cast<u32>(rela->r_info);
}
constexpr int DT_RELA = 7; /* Address of Rela relocs */
constexpr int DT_RELASZ = 8; /* Total size of Rela relocs */
constexpr int R_AARCH64_RELATIVE = 1027; /* Adjust by program base. */
constexpr size_t STACK_ALIGN = 16;
class JITContextImpl;
@@ -206,17 +190,17 @@ public:
if (!dyn.d_tag) {
break;
}
if (dyn.d_tag == DT_RELA) {
rela_dyn = dyn.d_un;
if (dyn.d_tag == ElfDtRela) {
rela_dyn = dyn.d_un.d_ptr;
}
if (dyn.d_tag == DT_RELASZ) {
num_rela = dyn.d_un / sizeof(Elf64_Rela);
if (dyn.d_tag == ElfDtRelasz) {
num_rela = dyn.d_un.d_val / sizeof(Elf64_Rela);
}
}
for (size_t i = 0; i < num_rela; i++) {
const auto rela{callbacks->ReadMemory<Elf64_Rela>(rela_dyn + i * sizeof(Elf64_Rela))};
if (Elf64_RelaType(&rela) != R_AARCH64_RELATIVE) {
if (Elf64RelType(rela.r_info) != ElfAArch64Relative) {
continue;
}
const VAddr contents{callbacks->MemoryRead64(rela.r_offset)};

View File

@@ -347,7 +347,7 @@ public:
}
if (!succeeded) {
UNREACHABLE_MSG("Out of address space!");
ASSERT_MSG(false, "Out of address space!");
return Kernel::ResultOutOfMemory;
}

View File

@@ -290,7 +290,7 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
u8 glasses_type{};
while (glasses_type_start < glasses_type_info.values[glasses_type]) {
if (++glasses_type >= glasses_type_info.values_count) {
UNREACHABLE();
ASSERT(false);
break;
}
}

View File

@@ -150,9 +150,9 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
event.event->GetWritableEvent().Clear();
if (events_interface.failed[event_id]) {
{
auto lk = system.StallCPU();
auto lk = system.StallProcesses();
gpu.WaitFence(params.syncpt_id, target_value);
system.UnstallCPU();
system.UnstallProcesses();
}
std::memcpy(output.data(), &params, sizeof(params));
events_interface.failed[event_id] = false;

Some files were not shown because too many files have changed in this diff Show More