Compare commits

...

407 Commits

Author SHA1 Message Date
yuzubot
7bc63c6e27 "Merge Tagged PR 1340" 2021-02-06 13:01:56 +00:00
bunnei
61bf850f3d Merge pull request #5326 from german77/hidUpdate1
HID: Update the HID service to match more closely to switchbrew part 1
2021-02-06 02:40:11 -08:00
bunnei
1498a7c9a8 Merge pull request #5862 from bunnei/kevent
Kernel Rework: Refactor KEvent/KReadableEvent/KWritableEvent
2021-02-05 23:00:43 -08:00
bunnei
3a804752cb Merge pull request #5875 from lioncash/identifier
k_priority_queue: Minor cleanup
2021-02-05 17:02:13 -08:00
bunnei
ea4f62615e hle: kernel: Drop R_UNLESS_NOLOG in favor of expanded if-statement. 2021-02-05 14:03:36 -08:00
bunnei
546af64340 hle: kernel: KAddressArbiter: Remove noisy error log. 2021-02-05 14:03:36 -08:00
bunnei
eba3c59a61 hle: kernel: svc: Cleanup KEvent/KReadableEvent/KWritableEvent SVCs. 2021-02-05 14:03:36 -08:00
bunnei
18175c71ed common: scope_exit: Add a cancellable ScopeExit macro. 2021-02-05 14:03:36 -08:00
bunnei
ff3c7c068b hle: kernel: Reimplement KReadableEvent and KWritableEvent. 2021-02-05 14:03:32 -08:00
bunnei
6bf80dfee0 hle: kernel: Implement KEvent. 2021-02-05 14:00:36 -08:00
bunnei
e9446d232f hle: kernel: KAddressArbiter: Use R_UNLESS_NOLOG where applicable. 2021-02-05 14:00:36 -08:00
bunnei
4577dcd5f9 common: common_funcs: Add R_UNLESS_NOLOG for scenarios that should not log. 2021-02-05 14:00:36 -08:00
bunnei
3f942c01f0 hle: kernel: Rename WritableEvent to KWritableEvent. 2021-02-05 14:00:36 -08:00
bunnei
e86a7e3691 hle: kernel: Rename ReadableEvent to KReadableEvent. 2021-02-05 14:00:36 -08:00
bunnei
b0727c90c5 Merge pull request #5867 from Morph1984/am-GetHealthWarningDisappearedSystemEvent
IApplicationFunctions: Implement GetHealthWarningDisappearedSystemEvent
2021-02-05 13:49:49 -08:00
bunnei
741dc13c5a Merge pull request #5865 from lat9nq/conditionally-quiet
video_core: host_shaders: Don't pass --quiet to glslangValidator if unavailable
2021-02-04 17:07:17 -08:00
bunnei
00cb631b2f Merge pull request #5876 from lioncash/truncation
k_affinity_mask: Avoid implicit truncation to bool
2021-02-04 14:44:11 -08:00
Lioncash
756365386a k_affinity_mask: Avoid implicit truncation to bool
This can cause compiler warnings. Instead, we can explicitly add a
boolean expression around it to naturally turn the result into a bool.
2021-02-04 15:35:46 -05:00
Lioncash
b944edc85d k_priority_queue: Unfold several declval usages
Given these are only used as function existence checks, we can simplify
some usages of declval, given they aren't particularly useful here.

Reduces a few template instantiations, which at most reduces compile
times a tiny bit.
2021-02-04 15:18:58 -05:00
Lioncash
31e6e58101 k_priority_queue: Simplify affinity mask type alias
We can make use of the _t variants of the templates to cut down on a
little bit of verbosity.
2021-02-04 14:57:41 -05:00
Lioncash
53aec1fe2d k_priority_queue: Resolved reserved identifier
An identifier containing a starting underscore followed by a capital
letter is reserved by the standard. It's trivial to avoid this by moving
the underscore to the end of the identifier.

While the likelihood of clashing here being minimal, we can turn a
"should not break" scenario into a definitive "will not break" one, so
why not?.
2021-02-04 14:55:08 -05:00
bunnei
eb3afd30b1 Merge pull request #5874 from Morph1984/create-keys-dir
key_manager: Create the keys directory if it does not exist
2021-02-04 11:47:14 -08:00
Morph
806e2d7900 key_manager: Create the keys directory if it does not exist 2021-02-04 06:05:50 -05:00
bunnei
b331bb5210 Merge pull request #5870 from german77/hanheldfix2
config: Always update handheld config
2021-02-03 19:02:59 -08:00
german
8019b2b9b5 Add footer types and address comments 2021-02-03 20:17:08 -06:00
german
9a9e81f2e9 Fix npad struct to match switchbrew 2021-02-03 20:17:08 -06:00
german
f30ef98761 Adds missing controller types and properties 2021-02-03 20:17:08 -06:00
bunnei
cde532cc52 Merge pull request #5863 from ogniK5377/disable-reverb
audren: Disable reverb for the time being
2021-02-03 18:05:54 -08:00
german
c1b81f776c Always update handheld config 2021-02-03 17:48:04 -06:00
bunnei
b0c9752663 Merge pull request #5848 from ogniK5377/k-resourcelimit
kernel: Rewrite resource limit to be more accurate
2021-02-03 14:53:25 -08:00
Chloe Marcec
2c6e940493 Simplify limitableresource names 2021-02-03 12:55:16 +11:00
bunnei
48d040fded Merge pull request #5842 from german77/userfix
acc: Fix error when second user is selected
2021-02-02 15:43:51 -08:00
bunnei
e5a76d728f Merge pull request #5841 from german77/username
Avoid overwriting username
2021-02-02 15:04:09 -08:00
LC
82d232db46 Merge pull request #5866 from Morph1984/log-all-paths
settings: Log the cache, config and mod load directories
2021-02-02 11:07:42 -05:00
Morph
13b08376b7 IApplicationFunctions: Implement GetHealthWarningDisappearedSystemEvent 2021-02-02 10:47:38 -05:00
Morph
9137f3ec68 settings: Log the cache, config, and mod load directories 2021-02-02 04:01:05 -05:00
lat9nq
fc43eac82a video_core: host_shaders: Don't pass --quiet to glslangValidator if unavailable
Prevents CMake from calling `glslangValidator` with `--quiet` when it is
not available, i.e. on older downstream versions from Ubuntu.
2021-02-01 23:39:54 -05:00
bunnei
d4ebc9a120 Merge pull request #5861 from german77/HandheldFix
hid: Only update motion for npad and prevent over scheduling events
2021-02-01 18:46:26 -08:00
Chloe Marcec
64c3582705 Compile error 2021-02-02 13:23:34 +11:00
Chloe Marcec
9e4b2d60bc Address issues 2021-02-02 13:23:00 +11:00
LC
5e49b81d4d Merge pull request #5864 from MerryMage/is-thumb
arm_dynarmic_32: Print out CPSR.T on exception
2021-02-01 13:36:55 -05:00
MerryMage
821fc4a7b6 arm_dynarmic_32: Print out CPSR.T on exception 2021-02-01 18:35:33 +00:00
bunnei
f317b0d354 Merge pull request #5859 from Morph1984/nifm
nifm: Stub GetCurrentNetworkProfile and GetCurrentIpConfigInfo
2021-01-31 21:31:14 -08:00
Ameer J
f614d7d887 Merge pull request #5856 from Morph1984/nifm-fix-getappletinfo-stub
nifm: Fix GetAppletInfo stub
2021-01-31 23:33:11 -05:00
Chloe Marcec
9fc7f60b94 audren: Disable reverb for the time being
As this is causing issues in a few games, it's best to have it disabled until it's completely implemented
2021-02-01 14:41:58 +11:00
bunnei
67d08f14af Merge pull request #5858 from Morph1984/IsGamePlayRecordingSupported-stub
am/IApplicationFunctions: Stub IsGamePlayRecordingSupported
2021-01-31 18:04:04 -08:00
german
2489547dc5 Only update motion for npad and prevent over scheduling events 2021-01-31 18:52:38 -06:00
bunnei
1498ece290 Merge pull request #5860 from Morph1984/prepo-transmission-stub
prepo: Stub RequestImmediateTransmission and GetTransmissionStatus
2021-01-31 12:26:00 -08:00
Morph
f7dc77e03a Merge pull request #5846 from ameerj/analog-join
analog_from_button: Fix update_thread.join exception
2021-01-31 23:06:06 +08:00
Morph
8c7d89e6c7 nifm: Stub GetCurrentIpConfigInfo
- Used by Lets Sing 12
2021-01-31 07:36:37 -05:00
Morph
7d9465d47a prepo: Stub GetTransmissionStatus 2021-01-31 07:07:11 -05:00
Morph
2394807b42 prepo: Stub RequestImmediateTransmission
- Used by Animal Crossing: New Horizons
2021-01-31 07:04:35 -05:00
Morph
4bf1cf1f81 nifm: Stub GetCurrentNetworkProfile
- Used by Minecraft Bedrock Edition
- Used by Bloons TD 5
2021-01-31 06:56:29 -05:00
Morph
70a2065828 nifm: Add several structs 2021-01-31 06:56:29 -05:00
bunnei
ec51f8de82 Merge pull request #5857 from Morph1984/bsd-fix-eventfd-stub
bsd: Fix EventFd stub
2021-01-31 01:41:06 -08:00
Morph
9f6b35e61f am/IApplicationFunctions: Stub IsGamePlayRecordingSupported
Used by RetroArch
2021-01-31 03:11:03 -05:00
Morph
ded094a340 bsd: Fix EventFd stub 2021-01-31 02:57:56 -05:00
bunnei
1cc009a996 Merge pull request #5855 from Morph1984/bsd-fix-getsockopt-stub
bsd: Fix GetSockOpt stub
2021-01-30 23:21:21 -08:00
Morph
9914db8daa nifm: Fix GetAppletInfo stub 2021-01-31 02:19:36 -05:00
Morph
94f660d1cb Merge pull request #5851 from ameerj/pop-inv-stub
am: Stub TryPopFromFriendInvitationStorageChannel
2021-01-31 14:18:40 +08:00
Morph
6cc769065d bsd: Fix GetSockOpt stub 2021-01-31 01:08:56 -05:00
bunnei
360ea64738 Merge pull request #5852 from ameerj/event-fd-stub
bsd: Stub EventFd
2021-01-30 21:38:08 -08:00
ameerj
ad146eca60 bsd: Stub EventFd
Used by Family Feud
2021-01-30 21:47:32 -05:00
ameerj
99dcf7da21 am: Stub TryPopFromFriendInvitationStorageChannel
Used by Family Feud
2021-01-30 21:43:55 -05:00
Chloe Marcec
ee333e063d fix compile error 2021-01-30 21:51:22 +11:00
Chloe Marcec
56742c6222 cleanup commenting 2021-01-30 21:20:35 +11:00
Chloe Marcec
7791cfd960 Drop m_ from lock 2021-01-30 21:19:49 +11:00
Chloe Marcec
3bf62c7a8a Move to GetGlobalTimeNs, fix GetTotalPhysicalMemoryAvailable 2021-01-30 21:03:10 +11:00
Chloe Marcec
3be1a565f8 kernel: Rewrite resource limit to be more accurate
Matches closer to hardware
2021-01-30 20:40:49 +11:00
bunnei
a4526c4e1a Merge pull request #5779 from bunnei/kthread-rewrite
Rewrite KThread to be more accurate
2021-01-29 23:06:40 -08:00
bunnei
5861bacafd Merge pull request #5795 from ReinUsesLisp/bytes-to-map-end
video_core/memory_manager: Add BytesToMapEnd
2021-01-29 22:56:29 -08:00
Morph
f67cfebada Merge pull request #5838 from german77/prepostub
prepo: Stub GetSystemSessionId
2021-01-30 14:19:59 +08:00
bunnei
aaf834ffa6 Merge pull request #5847 from bunnei/update-dynarmic
externals: Dynarmic: Update to latest to include A32 ISB hook.
2021-01-29 21:52:25 -08:00
german
9150b8972e Stub GetSystemSessionId 2021-01-29 18:41:28 -06:00
bunnei
650734cc3e Merge pull request #5805 from german77/HandheldFix
Fix connect and disconnect controller events
2021-01-29 16:39:01 -08:00
bunnei
15d3376399 externals: Dynarmic: Update to latest to include A32 ISB hook.
- Fixes perf. issues with Megadimension Neptunia VII.
2021-01-29 16:36:45 -08:00
ameerj
ef8b3623f5 analog_from_button: Fix update_thread.join exception
This commit aims to address an exception that occurs when trying to join the Analog object's update_thread.

By using an atomic bool for the status of the update thread, we ensure its value is consistent across the threads accessing it.
2021-01-29 18:41:22 -05:00
bunnei
b786568c5a Merge pull request #5809 from ogniK5377/FlushAudioOutBuffers
audout: FlushAudioOutBuffers
2021-01-28 21:54:46 -08:00
bunnei
543e212554 hle: kernel: KLightLock: Fix several bugs. 2021-01-28 21:53:21 -08:00
bunnei
8d1afcb90f common: common_funcs: Change R_UNLESS to LOG_ERROR. 2021-01-28 21:51:16 -08:00
bunnei
c8fe8247ee arm: dynarmic: Reintroduce JIT checks on SaveContext/LoadContext. 2021-01-28 21:50:39 -08:00
bunnei
e24c6dab93 hle: kernel: KThread: Release thread resource on thread exit. 2021-01-28 21:49:47 -08:00
bunnei
10738839ad yuzu: debugger: Ignore HLE threads. 2021-01-28 21:42:27 -08:00
bunnei
3856564727 hle: kernel: process: Add state lock. 2021-01-28 21:42:26 -08:00
bunnei
ff46ef7ea3 hle: kernel: threading: Fix bug with host thread naming. 2021-01-28 21:42:26 -08:00
bunnei
6ee8340a6b hle: kernel: k_scheduler_lock: Cleanup. 2021-01-28 21:42:26 -08:00
bunnei
055194d2ab core: arm: Remove unnecessary JIT checks. 2021-01-28 21:42:26 -08:00
bunnei
091e9e8c41 common: common_funcs: Log error on R_UNLESS. 2021-01-28 21:42:26 -08:00
bunnei
6e953f7f02 hle: kernel: Allocate a dummy KThread for each host thread, and use it for scheduling. 2021-01-28 21:42:26 -08:00
bunnei
37f74d8741 hle: kernel: k_scheduler: Use atomics for current_thread, etc. 2021-01-28 21:42:26 -08:00
bunnei
f6b10fad63 hle: kernel: k_scheduler: Fix for single core mode. 2021-01-28 21:42:26 -08:00
bunnei
0a1449e04b kernel: Fix build errors. 2021-01-28 21:42:26 -08:00
bunnei
89a5ae92bd core: cpu_manager: Remove unused variable. 2021-01-28 21:42:26 -08:00
bunnei
ca78f77827 hle: kernel: KScheduler: Introduce thread context_guard. 2021-01-28 21:42:26 -08:00
bunnei
cdd14b03e5 hle: kernel: Recode implementation of KThread to be more accurate. 2021-01-28 21:42:26 -08:00
bunnei
1470338458 kernel: svc_types: Add ThreadActivity. 2021-01-28 21:42:26 -08:00
bunnei
1772ebeb1e kernel: KSchedulerPriorityQueue: Lowest priority should be LowestThreadPriority. 2021-01-28 21:42:26 -08:00
bunnei
1f99f5473c kernel: k_light_lock: Simplify EmuThreadHandle implementation. 2021-01-28 21:42:26 -08:00
bunnei
c0f5830323 hle: kernel: TimeManager: Simplify to not rely on previous EmuThreadHandle implementation. 2021-01-28 21:42:26 -08:00
bunnei
bb966d3e33 common: common_funcs: Add useful kernel macro R_SUCCEED_IF. 2021-01-28 21:42:26 -08:00
bunnei
ff186b2498 core: hle: kernel: object: Implement Finalize() virtual method. 2021-01-28 21:42:26 -08:00
bunnei
33b4930280 core: hle: kernel: svc_results: Populate with several missing error codes. 2021-01-28 21:42:26 -08:00
bunnei
5a4fc4a529 core: hle: kernel: Implement KLightLock. 2021-01-28 21:42:26 -08:00
bunnei
97129bc742 core: hle: kernel: Implement KThreadQueue. 2021-01-28 21:42:25 -08:00
bunnei
2fb77adb9f common: common_funcs: Add a few more useful macros for kernel code. 2021-01-28 21:42:25 -08:00
bunnei
4dbf3f4880 hle: kernel: KThread: Clean up thread priorities. 2021-01-28 21:42:25 -08:00
bunnei
1e55498110 hle: kernel: KThread: Reorganize thread priority defaults. 2021-01-28 21:42:25 -08:00
bunnei
0530292b97 hle: kernel: KThread: Fix ThreadType definition. 2021-01-28 21:42:25 -08:00
bunnei
4782985013 hle: kernel: Move single core "phantom mode" out of KThread.
- This is a workaround that does not belong in a kernel primitive.
2021-01-28 21:42:25 -08:00
bunnei
eea346ba8e hle: kernel: KThread: Remove thread types that do not exist. 2021-01-28 21:42:25 -08:00
bunnei
9a4e148f9e arm: arm_dynarmic: Skip calls when JIT is invalid.
- This can happen if called from an idle or suspension thread.
2021-01-28 21:42:25 -08:00
bunnei
c0d3aef28c core: hle: kernel: Rename Thread to KThread. 2021-01-28 21:42:25 -08:00
german
8ba0cac71c Fix user changing to 0 if valid 2021-01-28 21:02:25 -06:00
bunnei
df41e78205 Merge pull request #5837 from german77/socketstub
sockets: Stub GetSockOpt
2021-01-28 16:28:11 -08:00
german
7b8fa78c65 Avoid overwritting username 2021-01-28 16:58:53 -06:00
LC
c21ce728c2 Merge pull request #5831 from MerryMage/isb
dynarmic: Do not flush cache when executing an ISB instruction
2021-01-28 10:54:48 -05:00
LC
16818e952c Merge pull request #5836 from ReinUsesLisp/unaligned-constr-sched
vk_scheduler: Fix unaligned placement new expressions
2021-01-28 10:53:15 -05:00
LC
9f6290d207 Merge pull request #5840 from Morph1984/prepo-fix
prepo: Fix BufferDescriptorX invalid buffer index errors and add New variants of SaveReport
2021-01-28 10:52:44 -05:00
LC
0e125dfd43 Merge pull request #5835 from Morph1984/cleanup-sixaxis-fusion
hid: Cleanup SixAxisFusionParameters
2021-01-28 10:51:04 -05:00
Morph
4921ba05db hid: Add static_assert for Parameter size 2021-01-28 09:41:43 -05:00
Morph
ae6b3bdfbf prepo: Fix BufferDescriptorX invalid buffer errors and add "New" variants of SaveReport
The second input buffer could be optional when prepo/srepo is called, test for the availability of the second buffer prior to reading from it.
2021-01-28 01:32:24 -05:00
Morph
008afa5d59 hle_ipc: Add Can(Read, Write)Buffer
Allows us to test whether a buffer can be read from or written to memory
2021-01-28 01:32:24 -05:00
german
659b5f8088 Stub GetSockOpt 2021-01-27 23:18:20 -06:00
ReinUsesLisp
9e88ad8da9 vk_scheduler: Fix unaligned placement new expressions
We were accidentaly creating an object in an unaligned memory address.
Fix this by manually aligning the offset.
2021-01-27 22:28:22 -03:00
bunnei
45b13c3037 Merge pull request #5786 from ReinUsesLisp/glsl-cbuf
gl_shader_decompiler: Fix constant buffer size calculation
2021-01-27 15:27:53 -08:00
Rodrigo Locatti
ef6cc3aa1d vulkan_device: Blacklist Intel from float16 math (#5798)
Astral Chain crashes Intel's SPIR-V compiler when using fp16.
Disable this while the vendor works on a fix.
2021-01-27 13:31:32 -08:00
bunnei
28b822fe38 Merge pull request #5778 from ReinUsesLisp/shader-dir
renderer_opengl: Avoid precompiled cache and force NV GL cache directory
2021-01-27 11:34:21 -08:00
Morph
d4d39aa4c7 npad: Remove unused device handle parameter 2021-01-27 13:05:31 -05:00
bunnei
fb0fe3b8c3 Merge pull request #5812 from german77/StubSixaxisFusion
HID: Stub Set/Get/Reset SixaxisSensorFusionParameters
2021-01-26 15:58:04 -08:00
MerryMage
2350b76a91 externals: Update dynarmic to 0f27368f 2021-01-26 23:46:49 +00:00
bunnei
09b6f03592 Merge pull request #5810 from ogniK5377/stereo-vision
hle: Implement remaining services for Stereo Vision
2021-01-26 15:01:01 -08:00
bunnei
72c1cb85f1 Merge pull request #5824 from ogniK5377/IPsmSession
psm: IPsmSession
2021-01-26 11:11:29 -08:00
bunnei
64a5548454 Merge pull request #5774 from ogniK5377/mii-raw-random
mii: Fix BuildRandomStoreData & Cleanup raw_data
2021-01-25 16:41:20 -08:00
bunnei
81a037df9d Merge pull request #5771 from ogniK5377/lm-rework
lm: Recode LM service
2021-01-25 10:18:09 -08:00
Chloe Marcec
2c57f0fbd5 Omit system reference 2021-01-25 23:13:37 +11:00
Chloe Marcec
04e9486651 psm: IPsmSession
Used by homebrew menu
2021-01-25 21:37:51 +11:00
bunnei
2a2ee62cfd Merge pull request #5799 from ogniK5377/event-register-unregister
nvdrv: Unregister already registered events
2021-01-24 23:19:10 -08:00
bunnei
62766b1326 Merge pull request #5785 from ReinUsesLisp/buffer-dma
video_core/memory_manager: Flush destination buffer on CopyBlock
2021-01-24 22:57:00 -08:00
Rodrigo Locatti
5dc021d15b Merge pull request #5823 from ReinUsesLisp/revert-flags
Revert "Start of Integer flags implementation"
2021-01-25 03:35:48 -03:00
ReinUsesLisp
34c3ec2f8c Revert "Start of Integer flags implementation"
This reverts #4713. The implementation in that PR is not accurate.
It does not reflect the behavior seen in hardware.
2021-01-25 02:48:03 -03:00
bunnei
45e117b043 Merge pull request #5819 from ReinUsesLisp/cull-mode-cast
vk_graphics_pipeline: Fix narrowing conversion on MSVC
2021-01-24 16:58:38 -08:00
ReinUsesLisp
9dc4a80b17 vk_graphics_pipeline: Fix narrowing conversion on MSVC 2021-01-24 21:41:29 -03:00
LC
df0d8c45d2 Merge pull request #5807 from ReinUsesLisp/vc-warnings
video_core: Silence the remaining gcc warnings and enforce them
2021-01-24 17:36:43 -05:00
Rodrigo Locatti
b769b1be26 Merge pull request #5363 from ReinUsesLisp/vk-image-usage
vk_texture_cache: Support image store on sRGB images with VkImageViewUsageCreateInfo
2021-01-24 18:44:51 -03:00
bunnei
44c5ea3639 Merge pull request #5151 from comex/xx-vfs
vfs_real: When moving files or directories, don't assume file opening will succeed
2021-01-24 13:42:51 -08:00
ReinUsesLisp
6b00443bc1 vk_texture_cache: Support image store on sRGB images with VkImageViewUsageCreateInfo
Vulkan 1.0 didn't support creating sRGB image views on an ABGR8 VkImage
with storage usage bits. VK_KHR_maintenance2 addressed this allowing to
reduce the usage bits on a VkImageView.

To allow image store on non-sRGB image views when the VkImage is created
with sRGB, always create VkImages without sRGB and add the sRGB format
on the view.
2021-01-24 18:16:43 -03:00
LC
8959f3521f Merge pull request #5814 from ReinUsesLisp/remove-rdna-dynstate
vulkan_device: Lift VK_EXT_extended_dynamic_state blacklist on RDNA
2021-01-24 15:54:30 -05:00
ReinUsesLisp
6a0143400f vulkan_device: Lift VK_EXT_extended_dynamic_state blacklist on RDNA
It seems to be safe to use this on new drivers.
2021-01-24 20:21:11 -03:00
ReinUsesLisp
748551dafb cmake: Enforce -Warray-bounds and -Wmissing-field-initializers globally 2021-01-24 17:31:29 -03:00
bunnei
19c14589d3 Merge pull request #5796 from ReinUsesLisp/vertex-a-bypass-vk
vk_pipeline_cache: Properly bypass VertexA shaders
2021-01-24 11:22:58 -08:00
german
a8245cf2f1 Stub Set/Get/Reset SixaxisSensorFusionParameters 2021-01-24 11:28:52 -06:00
Chloe Marcec
2afc1060ef Print Process ID and Thread ID as hex 2021-01-25 02:47:40 +11:00
Chloe Marcec
5882cc0502 hle: Implement remaining services for Stereo Vision
Used by Zelda Breath of the Wild, Super Mario Odyssey and Nintendo Labo
2021-01-25 00:34:01 +11:00
LC
04dcada85f Merge pull request #5808 from ReinUsesLisp/glslang-quiet
host_shaders/cmake: Pass --quiet to glslang to keep it quiet
2021-01-24 05:01:10 -05:00
Chloe Marcec
b2b95e96c1 audout: FlushAudioOutBuffers
Fixes Devil May Cry
2021-01-24 19:13:34 +11:00
ReinUsesLisp
f81c783b5b host_shaders/cmake: Pass --quiet to glslang to keep it quiet
Silences noisy builds on toolchains.
2021-01-24 04:55:23 -03:00
ReinUsesLisp
cc4335a9c6 video_core/cmake: Enforce -Warray-bounds and -Wmissing-field-initializers 2021-01-24 04:42:41 -03:00
bunnei
f7ac4e1eb4 Merge pull request #5806 from bunnei/am-stub
hle: service: am: Stub ILibraryAppletAccessor::PresetLibraryAppletGpuTimeSliceZero.
2021-01-23 23:37:05 -08:00
ReinUsesLisp
1b76e7e890 video_core: Silence -Wmissing-field-initializers warnings 2021-01-24 04:32:19 -03:00
ReinUsesLisp
80a673a27f maxwell_3d: Silence array bounds warnings 2021-01-24 04:31:41 -03:00
ReinUsesLisp
ad48259d7e maxwell_to_vk: Silence -Wextra warnings about using different enum types 2021-01-24 04:03:36 -03:00
german
3b4da2d7fa Fix connect and disconnect controller events 2021-01-23 22:59:44 -06:00
comex
e9bb95ae16 vfs_real: When moving files or directories, don't assume file opening will succeed
Found this via a warning, but it's a substantive fix.

Since this is only for a cache, it should be safe to silently drop the
entry if opening fails.  I think.
2021-01-23 16:19:29 -05:00
Levi Behunin
9477d23d70 shader_ir: Fix comment typo 2021-01-23 13:16:37 -05:00
LC
bfd2bcb068 Merge pull request #5800 from Morph1984/max-vibration-duration
sdl_impl: Set the maximum vibration duration to 1 second
2021-01-23 10:46:18 -05:00
Morph
5942d206c2 sdl_impl: Set the maximum vibration duration to 1 second 2021-01-23 08:06:07 -05:00
Chloe Marcec
822edff5bd Simplify condition 2021-01-23 22:12:05 +11:00
Chloe Marcec
3b0458a7a5 nvdrv: Unregister already registered events 2021-01-23 22:02:14 +11:00
LC
65f821850e Merge pull request #5797 from ReinUsesLisp/nsight-aftermath-build
nsight_aftermath_tracker: Fix build issues when enabled
2021-01-23 05:40:50 -05:00
Chloe Marcec
df42100320 Clamp string reads to buffer size 2021-01-23 18:24:57 +11:00
ReinUsesLisp
966896daad video_core/cmake: Properly generate fatal errors on Aftermath
Fix "message(ERROR ..." to "message(FATAL_ERROR ..." to properly stop
cmake when Nsight Aftermath can't be configured.
2021-01-23 04:15:30 -03:00
ReinUsesLisp
625a011888 nsight_aftermath_tracker: Fix build issues when enabled
Fixes a bunch of build errors when Nsight Aftermath is properly enabled.
2021-01-23 04:13:39 -03:00
bunnei
12355cbf02 Merge pull request #5776 from ogniK5377/lbl
lbl: Implement most of lbl
2021-01-22 23:13:23 -08:00
ReinUsesLisp
37ef2ee595 vk_pipeline_cache: Properly bypass VertexA shaders
The VertexA stage is not yet implemented, but Vulkan is adding its
descriptors, causing a discrepancy in the pushed descriptors and the
template. This generally ends up in a driver side crash.

Bypass the VertexA stage for now.
2021-01-23 03:59:59 -03:00
bunnei
302a5f00e8 Merge pull request #4713 from behunin/int-flags
Start of Integer flags implementation
2021-01-22 21:57:14 -08:00
bunnei
981d8e82d2 Merge pull request #5765 from ogniK5377/StoreSaveDataThumbnail-stub
acc: Stub StoreSaveDataThumbnail
2021-01-22 21:51:54 -08:00
ReinUsesLisp
bda177ef40 video_core/memory_manager: Add BytesToMapEnd
Track map address sizes in a flat ordered map and add a method to query
the number of bytes until the end of a map in a given address.
2021-01-22 18:31:12 -03:00
bunnei
a175ba1089 Merge pull request #5784 from v1993/patch-1
Bump conan SDL2 version to 2.0.14
2021-01-21 21:39:44 -08:00
bunnei
1e9b1d439f common: Add missing include to bit_util.h 2021-01-21 15:30:28 -08:00
ReinUsesLisp
436457b6e7 gl_shader_decompiler: Fix constant buffer size calculation
The divide logic was wrong and can cause an uniform buffer size
overflow.
2021-01-21 19:47:41 -03:00
bunnei
2c4c7aea8a Merge pull request #5781 from lioncash/bits
bit_util: Unify implementations of MostSignificantBit32/MostSignificantBit64
2021-01-21 14:45:40 -08:00
ReinUsesLisp
b7febb5625 video_core/memory_manager: Remove unused CopyBlockUnsafe
This function was not being used.
2021-01-21 19:16:06 -03:00
ReinUsesLisp
0e9a6759f9 video_core/memory_manager: Flush destination buffer on CopyBlock
When we copy into a buffer, it might contain data modified from the GPU
on the same pages. Because of this, we have to flush the contents before
writing new data.

An alternative approach would be to write the data in place, but games
can also write data in other ways, invalidating our contents.

Fixes geometry in Zombie Panic in Wonderland DX.
2021-01-21 19:16:06 -03:00
ReinUsesLisp
dd790abab0 video_core/memory_manager: Add GPU address based flush method
Allow flushing rasterizer contents based on a GPU address.
2021-01-21 19:16:05 -03:00
Valeri
46dda01151 Bump conan SDL2 version to 2.0.14
Update conan package version used for building.

A couple of new joystick-related functions might pose interest to yuzu's input system. Some sort of LED management have been added, but it doesn't seem to support leds used for player number indication JoyCons/ProCons use.
2021-01-21 21:47:35 +03:00
Lioncash
6ff2db181f bit_util: Unify implementations of MostSignificantBit32/MostSignificantBit64
We can use the standardized CLZ facilities to perform this. This also
allows us to make utilizing functions constexpr and eliminate the
inclusion of an intrinsics header.
2021-01-21 04:07:58 -05:00
bunnei
a1335d3d51 Merge pull request #5270 from german77/multiTouch
HID: Add multitouch support
2021-01-20 22:39:01 -08:00
bunnei
ffbde909c8 Merge pull request #5361 from ReinUsesLisp/vk-shader-comment
vk_shader_decompiler: Show comments as OpUndef with a type
2021-01-20 21:33:42 -08:00
bunnei
f83ef80ebd hle: service: am: Stub ILibraryAppletAccessor::PresetLibraryAppletGpuTimeSliceZero.
- Used by Monster Hunter Rise demo.
2021-01-20 20:35:12 -08:00
ReinUsesLisp
51512d01d8 renderer_opengl: Avoid precompiled cache and force NV GL cache directory
Setting __GL_SHADER_DISK_CACHE_PATH we can force the cache directory to
be in yuzu's user directory to stop commonly distributed malware from
deleting our driver shader cache. And by setting
__GL_SHADER_DISK_CACHE_SKIP_CLEANUP we can have an unbounded shader
cache size.

This has only been implemented on Windows, mostly because previous tests
didn't seem to work on Linux.

Disable the precompiled cache on Nvidia's driver. There's no need to
hide information the driver already has in its own cache.
2021-01-21 00:41:03 -03:00
bunnei
d98b0f8f48 Merge pull request #5743 from german77/HandheldFix
Fix player 1 turning on handheld and not updating handheld settings
2021-01-20 17:05:44 -08:00
Chloe Marcec
c795207fb2 lbl: Implement most of lbl
Pretty basic service, only thing left to do is handle setting applying once set:sys is implemented
2021-01-21 00:46:03 +11:00
Chloe Marcec
5b8bc56e65 mii: Fix BuildRandomStoreData & Cleanup raw_data
Cleaned up mii raw data to reflect the underlying values instead of just a chunk of bytes.
Fixed BuildRandomStoreData not actually generating random miis properly. "values" should be a u32, not a u8.
2021-01-20 21:53:57 +11:00
Chloe Marcec
dc18a1261c Mark DestinationToString as static 2021-01-20 18:42:39 +11:00
Chloe Marcec
dca2e2c8f1 Mark LogPacketHeaderEntry hash as noexcept 2021-01-20 18:35:58 +11:00
Chloe Marcec
83f8c1a25e lm: Recode LM service
Rework the service to spit out to logs instead of a seperate file as well as fix any crashes caused by lm.
2021-01-20 18:25:15 +11:00
bunnei
4cd8b2f1f7 Merge pull request #5755 from FearlessTobi/port-5344
Port citra-emu/citra#5344: "game_list: Fix folder reordering"
2021-01-19 10:53:18 -08:00
Chloe Marcec
2d33b2c55a acc: Stub StoreSaveDataThumbnail
Fixes ACA NEOGEO METAL SLUG hanging on boot.
2021-01-19 20:56:18 +11:00
Rodrigo Locatti
2ef4591e58 Merge pull request #5746 from lioncash/sign-compare
texture_cache/util: Resolve -Wsign-compare warning
2021-01-18 03:49:58 -03:00
LC
f1b58f0cd9 Merge pull request #5754 from lat9nq/fix-disable-boxcat
configure_service: Only compile FormatEventStatusString when YUZU_ENABLE_BOXCAT is enabled
2021-01-17 23:52:47 -05:00
LC
dd0679d710 Merge pull request #5757 from Morph1984/npad-handheld
npad: Add check for HANDHELD_INDEX in UpdateControllerAt()
2021-01-17 23:51:30 -05:00
Morph
4a67a5b917 npad: Add check for HANDHELD_INDEX in UpdateControllerAt() 2021-01-17 22:36:17 -05:00
german
e7c1d7bf77 Always update configuration for handheld 2021-01-17 18:40:27 -06:00
FearlessTobi
bf9f737c60 game_list: Fix folder reordering
The bug(s) happened because we swapped the contents on values.game_dirs, but the pointer each item had to their respective game_dir wasn't updated. This made it so that the item had the wrong game_dir associated with it after a "move up" or "move down" operation. It can be observed by choosing "open directory location" after such operation.

Changed from raw pointer to an index because it's equivalent but a bit clearer, but the change is not essential.

Co-Authored-By: Vitor K <29167336+vitor-k@users.noreply.github.com>
2021-01-18 01:22:54 +01:00
lat9nq
fb796843df configure_service: Only compile FormatEventStatusString when YUZU_ENABLE_BOXCAT is enabled
The function is unused if YUZU_ENABLE_BOXCAT is disabled, causing a
-Wunused-funciton error when compiled.

Wrapping it with `#ifdef YUZU_ENABLE_BOXCAT` to prevent compiling the
function when the variable is disabled. Opting to not use [[maybe
unused]] in case the function is totally unused in the future.
2021-01-17 17:54:29 -05:00
german
0bd8cecc94 Fix player 1 default connected value 2021-01-17 12:55:28 -06:00
bunnei
e8401964b4 Merge pull request #5360 from ReinUsesLisp/enforce-memclass-access
core: Silence Wclass-memaccess warnings and enforce it
2021-01-17 00:55:10 -08:00
Rodrigo Locatti
132f2006af Merge pull request #5745 from lioncash/documentation
video_core: Resolve -Wdocumentation warnings
2021-01-17 05:37:17 -03:00
bunnei
e1ecf64701 Merge pull request #5744 from lioncash/header-guard
vulkan_debug_callback: Add missing header guard
2021-01-17 00:16:12 -08:00
Lioncash
5f4e7c77bd texture_cache/util: Resolve -Wsign-compare warning
Resolves a -Wsign-compare warning on Clang.
2021-01-17 02:47:48 -05:00
Lioncash
40acc2c079 video_core: Resolve -Wdocumentation warnings
Silences some -Wdocumentation warnings on Clang.
2021-01-17 02:44:21 -05:00
Lioncash
c61b973968 vulkan_debug_callback: Add missing header guard
Prevents inclusion issues from occurring.
2021-01-17 02:39:24 -05:00
Rodrigo Locatti
0e0fc07135 Merge pull request #5740 from lioncash/const-fn
input_interpreter: Mark two member functions as const
2021-01-16 20:02:02 -03:00
Rodrigo Locatti
fd873fd369 Merge pull request #5262 from ReinUsesLisp/buffer-base
buffer_cache/buffer_base: Add a range tracking buffer container and tests
2021-01-16 19:48:26 -03:00
Lioncash
ca9afa3293 input_interpreter: Mark two member functions as const
These aren't stateful functions, so we can make use of const.

While we're at, we can resolve some -Wdocumentation warnings.
2021-01-16 16:08:35 -05:00
bunnei
ff2b7cc0d3 Merge pull request #5366 from Morph1984/button-press
input_interpreter: Add method to check for a button press state
2021-01-16 13:00:54 -08:00
Morph
3c8f936b31 input_interpreter: Add method to check for a button press state
This allows to check for continuous input for the duration of a button press/hold
2021-01-16 10:34:39 -05:00
bunnei
a7fd61fcce Merge pull request #5275 from FernandoS27/fast-native-clock
X86/NativeClock: Improve performance of clock calculations on hot path.
2021-01-15 23:01:42 -08:00
bunnei
8def504d73 Merge pull request #5336 from lioncash/tree
common/tree: Convert defines over to templates
2021-01-15 21:46:25 -08:00
Rodrigo Locatti
c17ee0da5d Merge pull request #5297 from ReinUsesLisp/vulkan-allocator-common
vulkan_memory_allocator: Improvements to the memory allocator
2021-01-15 21:50:05 -03:00
ReinUsesLisp
c3c7603076 vk_shader_decompiler: Show comments as OpUndef with a type
Silence the new validation layer error about SPIR-V not allowing OpUndef
on a OpTypeVoid, even when the SPIR-V spec doesn't say anything against
it.

They will be inserted as an undefined int to avoid SPIRV-Cross and
validation errors, but only when a debugging tool is attached.
2021-01-15 21:12:57 -03:00
LC
8be9e5b48b Merge pull request #5358 from ReinUsesLisp/rename-insert-padding
common/common_funcs: Rename INSERT_UNION_PADDING_{BYTES,WORDS} to _NOINIT
2021-01-15 16:19:46 -05:00
ReinUsesLisp
5f517e3e16 core/cmake: Enforce Wclass-memaccess
Treat -Wclass-memaccess as an error.
2021-01-15 16:31:19 -03:00
ReinUsesLisp
f8650a9580 core: Silence Wclass-memaccess warnings
This requires making several types trivial and properly initialize
them whenever they are called.
2021-01-15 16:31:19 -03:00
ReinUsesLisp
3ff978aa4f common/common_funcs: Rename INSERT_UNION_PADDING_{BYTES,WORDS} to _NOINIT
INSERT_PADDING_BYTES_NOINIT is more descriptive of the underlying behavior.
2021-01-15 16:27:28 -03:00
ReinUsesLisp
301e2b5b7a vulkan_memory_allocator: Remove unnecesary 'device' memory from commits 2021-01-15 16:19:40 -03:00
ReinUsesLisp
432f045dba vk_texture_cache: Use Download memory types for texture flushes
Use the Download memory type where it matters.
2021-01-15 16:19:40 -03:00
ReinUsesLisp
8f22f5470c vulkan_memory_allocator: Add allocation support for download types
Implements the allocator logic to handle download memory types. This
will try to use HOST_CACHED_BIT when available.
2021-01-15 16:19:39 -03:00
ReinUsesLisp
72541af3bc vulkan_memory_allocator: Add "download" memory usage hint
Allow users of the allocator to hint memory usage for downloads. This
removes the non-descriptive boolean passed for "host visible" or not
host visible memory commits, and uses an enum to hint device local,
upload and download usages.
2021-01-15 16:19:39 -03:00
ReinUsesLisp
fade63b58e vulkan_common: Move allocator to the common directory
Allow using the abstraction from the OpenGL backend.
2021-01-15 16:19:39 -03:00
ReinUsesLisp
c2b550987b renderer_vulkan: Rename Vulkan memory manager to memory allocator
"Memory manager" collides with the guest GPU memory manager, and a
memory allocator sounds closer to what the abstraction aims to be.
2021-01-15 16:19:39 -03:00
ReinUsesLisp
e996f1ad09 vk_memory_manager: Improve memory manager and its API
Fix a bug where the memory allocator could leave gaps between commits.
To fix this the allocation algorithm was reworked, although it's still
short in number of lines of code.

Rework the allocation API to self-contained movable objects instead of
naively using an unique_ptr to do the job for us. Remove the VK prefix.
2021-01-15 16:19:36 -03:00
bunnei
f728a504aa Merge pull request #5355 from lioncash/timer
common/timer: Remove
2021-01-15 09:42:33 -08:00
german
b483f2d010 Always initialize keyboard input 2021-01-15 09:05:17 -06:00
german
8495e1bd83 Add mutitouch support for touch screens 2021-01-15 09:05:17 -06:00
german
d8df9a16bd Allow to return up to 16 touch inputs per engine 2021-01-15 09:05:17 -06:00
german
390ee10eef Allow all touch inputs at the same time and remove config options that are not longer necesary 2021-01-15 09:05:17 -06:00
german
d583e01f54 Add multitouch support 2021-01-15 09:03:39 -06:00
LC
9754a8145c Merge pull request #5357 from ReinUsesLisp/alignment-log2
common/alignment: Rename AlignBits to AlignUpLog2 and use constraints
2021-01-15 03:12:36 -05:00
Rodrigo Locatti
5b9aedfc21 Merge pull request #5356 from lioncash/clz
common/bit_util: Replace CLZ/CTZ operations with standardized ones
2021-01-15 04:48:58 -03:00
Lioncash
8620de6b20 common/bit_util: Replace CLZ/CTZ operations with standardized ones
Makes for less code that we need to maintain.
2021-01-15 02:15:32 -05:00
ReinUsesLisp
89c15dd115 common/alignment: Upgrade to use constraints instead of static asserts 2021-01-15 04:13:39 -03:00
ReinUsesLisp
fe494a0ccd common/alignment: Rename AlignBits to AlignUpLog2
AlignUpLog2 describes what the function does better than AlignBits.
2021-01-15 04:13:33 -03:00
Lioncash
91084d9396 common/timer: Remove
This is a leftover from citra and dolphin that isn't used at all,
particularly given the <chrono> header exists.
2021-01-15 01:55:33 -05:00
LC
c8bf0caca0 Merge pull request #5354 from ReinUsesLisp/remove-common-color
common/color: Remove
2021-01-15 01:54:22 -05:00
LC
6676687694 Merge pull request #5352 from ReinUsesLisp/remove-tester
cmake: Remove yuzu_tester
2021-01-15 01:48:02 -05:00
ReinUsesLisp
95fa57f007 common/color: Remove
This is a leftover from Citra we no longer use.
2021-01-15 03:47:43 -03:00
LC
7f37822c74 Merge pull request #5353 from ReinUsesLisp/deduplicate-warning-flags
{video_,}core/cmake: Remove Werror flags already defined code-base wide
2021-01-15 01:45:01 -05:00
ReinUsesLisp
fb99446f24 core/cmake: Remove Werror flags already defined code-base wide 2021-01-15 03:39:24 -03:00
ReinUsesLisp
cc2c3e447f video_core/cmake: Remove Werror flags already defined code-base wide
These flags are already defined in src/cmake.
2021-01-15 03:37:34 -03:00
LC
28e78d81b2 Merge pull request #5351 from ReinUsesLisp/vc-unused-functions
cmake: Enforce -Wunused-function code-base wise
2021-01-15 01:36:51 -05:00
Rodrigo Locatti
185388f341 Merge pull request #5350 from ReinUsesLisp/vk-init-warns
vulkan_common: Silence missing initializer warnings
2021-01-15 03:32:01 -03:00
LC
76b465f3ef Merge pull request #5349 from ReinUsesLisp/anv-fix
vulkan_device: Enable shaderStorageImageMultisample conditionally
2021-01-15 01:17:00 -05:00
ReinUsesLisp
af540b0057 cmake: Remove yuzu_tester
We never ended up using yuzu_tester.
Removing it saves code duplication with yuzu_cmd, and distribution size on
prebuilt packages.

For unit testing, we can use catch2 from guest code and dump the results
to a file. Then execute yuzu from a script on ci if we want this to be
automated.
2021-01-15 03:14:44 -03:00
ReinUsesLisp
06e0506cb3 cmake: Enforce -Wunused-function code-base wide 2021-01-15 03:09:48 -03:00
ReinUsesLisp
71264ce9a7 video_core: Enforce -Wunused-function
Stops us from merging code with unused functions in the future.

If something is invoked behind conditionally evaluated code in
a way that the language can't see it (e.g. preprocessor macros), the
potentially unused function should use [[maybe_unused]].
2021-01-15 02:59:25 -03:00
LC
6dc1d48fd1 Merge pull request #5348 from ReinUsesLisp/astc-robustness
astc: Make the decoder more robust to invalid data
2021-01-15 00:59:10 -05:00
ReinUsesLisp
3e03391a49 vk_buffer_cache: Remove unused function 2021-01-15 02:58:55 -03:00
ReinUsesLisp
be8fd5490e vulkan_common: Silence missing initializer warnings
Silence warnings explicitly initializing all members on construction.
2021-01-15 02:55:11 -03:00
ReinUsesLisp
ba2ea7eeac vulkan_device: Enable shaderStorageImageMultisample conditionally
Fix Vulkan initialization on ANV.
2021-01-15 02:47:05 -03:00
ReinUsesLisp
22be115eb2 astc: Increase integer encoded vector size
Invalid ASTC textures seem to write more bytes here, increase
the size to something that can't make us push out of bounds.
2021-01-15 02:24:36 -03:00
ReinUsesLisp
0ec71b78fb astc: Return zero on out of bound bits
Avoid out of bound reads on invalid ASTC textures.
Games can bind invalid textures that make us read or write out of bounds.
2021-01-15 02:24:36 -03:00
LC
93f7719eed Merge pull request #5302 from lat9nq/appimage-update
ci/linux: Make Mainline AppImages updateable
2021-01-14 18:46:27 -05:00
bunnei
4038ca2e5d Merge pull request #5345 from lioncash/unused-var
yuzu: Remove unused variables in Qt code
2021-01-14 01:23:50 -08:00
Lioncash
e11e1dcf2d yuzu: Remove unused variables in Qt code
Removes two unused variables in out Qt code. In this case the removal of
these two results in less allocations, given std::map allocates on the
heap.
2021-01-14 03:05:41 -05:00
Morph
f1e278c30f Merge pull request #5343 from lioncash/qt6
configure_motion_touch: Migrate off QRegExp to QRegularExpression
2021-01-14 15:30:26 +08:00
Morph
980973d83e Merge pull request #5344 from lioncash/move
configure_motion_touch: Prevent use after move in ApplyConfiguration()
2021-01-14 15:24:03 +08:00
Lioncash
45aee996c1 configure_motion_touch: Prevent use after move in ApplyConfiguration()
touch_engine was being compared against after being moved into the
setter for the engine, so this comparison wouldn't behave properly.
2021-01-13 22:37:40 -05:00
Lioncash
a2952ac213 configure_motion_touch: Migrate off QRegExp to QRegularExpression
QRegularExpression was introduced in Qt 5 as a better replacement for
QRegExp. In Qt 6.0 QRegExp is removed entirely.

To remain forward compatible with Qt 6.0, we can transition over to
using QRegularExpression.
2021-01-13 22:25:52 -05:00
LC
5e35c69f35 Merge pull request #5330 from german77/regexerror
Fix IP validator error
2021-01-13 22:08:42 -05:00
bunnei
2c2ef9252f Merge pull request #5342 from lioncash/qt6
yuzu: Migrate off of setMargin() to setContentsMargins()
2021-01-13 10:44:21 -08:00
german
06cf705501 Fix IP validator error where the last octet produced an error if the value was higher than 199 2021-01-13 11:02:28 -06:00
Lioncash
0d7de7c2db yuzu: Migrate off of setMargin() to setContentsMargins()
setMargin() has been deprecated since Qt 5, and replaced with
setContentsMargins(). We can move over to setContentsMargins() to stay
forward-compatible with Qt 6.0.
2021-01-13 07:29:59 -05:00
Morph
baff865d7c Merge pull request #5341 from ReinUsesLisp/anv-storage
vulkan_device: Remove requirement on shaderStorageImageMultisample
2021-01-13 18:00:52 +08:00
ReinUsesLisp
d9a15a935b vulkan_device: Remove requirement on shaderStorageImageMultisample
yuzu doesn't currently emulate MS image stores. Requiring this makes no
sense for now. Fixes ANV not booting any games on Vulkan.
2021-01-13 06:21:33 -03:00
ReinUsesLisp
7bd603061c tests: Add unit tests for the GPU range tracking buffer container
Due to how error prone the container design is, this commit adds unit
tests for it.

Some tests taken from here are based on bugs from using this buffer
container in games, so if we ever break it in the future in a way that
might harm games, the tests should fail.
2021-01-13 04:31:40 -03:00
LC
c320da3f63 Merge pull request #5340 from Morph1984/gcc-warnings
cmake: Enforce -Werror=switch and -Werror=unused-variable
2021-01-13 02:30:31 -05:00
ReinUsesLisp
a4bfae1b55 buffer_cache/buffer_base: Add a range tracking buffer container
It keeps track of the modified CPU and GPU ranges on a CPU page
granularity, notifying the given rasterizer about state changes
in the tracking behavior of the buffer.

Use a small vector optimization to store buffers smaller than 256 KiB
locally instead of using free store memory allocations.
2021-01-13 04:14:58 -03:00
Morph
2b98da2ed4 cmake: Enforce -Werror=switch and -Werror=unused-variable 2021-01-13 01:57:18 -05:00
bunnei
0fb19e9bef Merge pull request #5280 from FearlessTobi/port-5666
Port citra-emu/citra#5666: "Rotate previous log file to "citra_log.txt.old""
2021-01-12 22:16:57 -08:00
bunnei
de1a316369 Merge pull request #5311 from ReinUsesLisp/fence-wait
vk_fence_manager: Use timeline semaphores instead of spin waits
2021-01-12 21:00:05 -08:00
Lioncash
b15e1a3501 common/tree: Convert defines over to templates
Reworks the tree header to operate off of templates as opposed to a
series of defines.

This allows all tree facilities to obey namespacing rules, and also
allows this code to be used within modules once compiler support is in
place.

This also gets rid to use a macro to define functions and structs for
necessary data types. With templates, these will be generated when
they're actually used, eliminating the need for the separate
declaration.
2021-01-12 16:46:36 -05:00
Lioncash
197b5d19bc common/tree: Remove unused splay tree defines
Makes for less code to take care of.
2021-01-12 02:32:41 -05:00
bunnei
99d2d77062 Merge pull request #5333 from lioncash/define
common/parent_of_member: Replace TYPED_STORAGE define with template alias
2021-01-11 20:47:30 -08:00
Lioncash
703c57a119 common/parent_of_member: Replace TYPED_STORAGE define with template alias
Provides the same construct, but makes it obey namespacing.
2021-01-11 18:26:04 -05:00
bunnei
eb3cb54aa5 Merge pull request #5266 from bunnei/kernel-synch
Rewrite KSynchronizationObject, KConditonVariable, and KAddressArbiter
2021-01-11 14:36:26 -08:00
bunnei
03dfc8d8e7 hle: kernel: thread: Preserve thread wait reason for debugging only.
- This is decoupled from core functionality and used for debugging only.
2021-01-11 14:23:17 -08:00
bunnei
81c1bfafea yuzu: debugger: wait_tree: Handle unknown ThreadState. 2021-01-11 14:23:16 -08:00
bunnei
6b2f653143 hle: kernel: k_scheduler_lock: Fix shadowing errors. 2021-01-11 14:23:16 -08:00
bunnei
354130cd84 core: arm: arm_interface: Fix shadowing errors. 2021-01-11 14:23:16 -08:00
bunnei
82f6037ec2 core: hle: Add missing calls to MicroProfileOnThreadExit. 2021-01-11 14:23:16 -08:00
bunnei
912dd50146 core: hle: Integrate new KConditionVariable and KAddressArbiter implementations. 2021-01-11 14:23:16 -08:00
bunnei
952d1ac487 core: hle: kernel: Update KAddressArbiter. 2021-01-11 14:23:16 -08:00
bunnei
b4e6d6c385 core: hle: kernel: Update KConditionVariable. 2021-01-11 14:23:16 -08:00
bunnei
1212fa60b6 core: hle: kernel: Begin moving common SVC defintions to its own header. 2021-01-11 14:23:16 -08:00
bunnei
8a155c4058 hle: kernel: Remove unnecessary AddressArbiter definition. 2021-01-11 14:23:16 -08:00
bunnei
92d5c63f01 common: common_funcs: Add R_UNLESS macro. 2021-01-11 14:23:16 -08:00
bunnei
f12701b303 hle: kernel: k_scheduler: Cleanup OnThreadPriorityChanged. 2021-01-11 14:23:16 -08:00
bunnei
d1309fb275 hle: kernel: Rename thread "status" to "state". 2021-01-11 14:23:16 -08:00
bunnei
c3c43e32fc hle: kernel: thread: Replace ThreadStatus/ThreadSchedStatus with a single ThreadState.
- This is how the real kernel works, and is more accurate and simpler.
2021-01-11 14:23:16 -08:00
bunnei
7420a717e6 core: hle: kernel: Add some useful functions for checking kernel addresses. 2021-01-11 14:23:16 -08:00
bunnei
4bbf173fc1 core: hle: kernel: svc_types: Add type definitions for KAddressArbiter. 2021-01-11 14:23:16 -08:00
bunnei
fb43b8efd2 common: Introduce useful tree structures. 2021-01-11 14:23:16 -08:00
bunnei
35c3c078e3 core: hle: kernel: Update KSynchronizationObject. 2021-01-11 14:23:16 -08:00
bunnei
1ae883435d core: hle: kernel: Begin moving common SVC results to its own header. 2021-01-11 14:23:16 -08:00
bunnei
8fc6e92ef1 hle: service: nfp: Remove incorrect signaling behavior in GetDeviceState. 2021-01-11 14:23:16 -08:00
Levi
7a3c884e39 Merge remote-tracking branch 'upstream/master' into int-flags 2021-01-10 22:09:56 -07:00
bunnei
46cd71d1c7 Merge pull request #5229 from Morph1984/fullscreen-opt
yuzu/main: Add basic command line arguments
2021-01-10 18:53:04 -08:00
LC
5e161b2531 Merge pull request #5324 from Morph1984/docked-default
config: Enable docked mode by default
2021-01-10 20:51:33 -05:00
bunnei
32df83e55d Merge pull request #5312 from german77/overclockenabled
apm: Stub IsCpuOverclockEnabled
2021-01-10 14:30:13 -08:00
Morph
05f58144c9 config: Enable docked mode by default 2021-01-10 09:37:38 -05:00
bunnei
fe9588f4a0 Merge pull request #5323 from Morph1984/enforce-c4101
cmake: Enforce C4101
2021-01-09 22:49:58 -08:00
Morph
25724898d0 cmake: Enforce C4101
This matches GCC's -Wunused-variable
2021-01-10 01:16:25 -05:00
Morph
e07540264d yuzu_cmd: Silence unreferenced local variable warning 2021-01-10 01:10:36 -05:00
LC
0f932d30f5 Merge pull request #5320 from ReinUsesLisp/div-ceil-type
common/div_ceil: Return numerator type
2021-01-09 16:45:29 -05:00
LC
64a24f3344 Merge pull request #5322 from Morph1984/resolve-c4062-msvc
general: Resolve C4062 warnings on MSVC
2021-01-09 16:43:47 -05:00
Morph
4aae21e1e4 general: Resolve C4062 warnings on MSVC 2021-01-09 14:46:35 -05:00
ReinUsesLisp
c190586597 common/div_ceil: Return numerator type
Fixes instances where DivCeil(u32, u64) would surprisingly return u64,
instead of the more natural u32.
2021-01-09 03:16:10 -03:00
Rodrigo Locatti
7bad1974a6 Merge pull request #5319 from ReinUsesLisp/msvc-warnings
cmake: Enforce C4062, C4265, C4388, and C5038
2021-01-09 03:13:25 -03:00
ReinUsesLisp
d7128845c9 cmake: Enforce C4062, C4265, C4388, and C5038
This should match some warnings we treat as errors on gcc and clang,
caching bugs early and reducing the number of instances where we have to
edit commits to make CI happy when developing from Windows.
2021-01-09 02:19:17 -03:00
ReinUsesLisp
c68d0dc851 file_sys/registered_cache: Silence virtual functions without override warnings 2021-01-09 00:04:12 -03:00
ReinUsesLisp
b4451c5e81 core: Silence unhandled enum in switch warnings 2021-01-08 23:21:07 -03:00
ReinUsesLisp
613b3671b7 tests/ring_buffer: Silence signed/unsigned mismatch warnings 2021-01-08 23:14:38 -03:00
bunnei
8eea7c1176 Merge pull request #5231 from ReinUsesLisp/dyn-bindings
renderer_vulkan/fixed_pipeline_state: Move enabled bindings to static state
2021-01-08 12:24:46 -08:00
german
385a4555d5 Stub IsCpuOverclockEnabled 2021-01-08 09:44:56 -06:00
bunnei
61f707d708 Merge pull request #5300 from JeremyStarTM/patch-1
Removed MacOS build link
2021-01-08 00:02:12 -08:00
ReinUsesLisp
154a7653f9 vk_fence_manager: Use timeline semaphores instead of spin waits
With timeline semaphores we can avoid creating objects. Instead of
creating an event, grab the current tick from the scheduler and flush
the current command buffer. When the fence has to be queried/waited, we
can do so against the master semaphore instead of spinning on an event.

If Vulkan supported NVN like events or fences, we could signal from the
command buffer and wait for that without splitting things in two
separate command buffers.
2021-01-08 02:47:28 -03:00
bunnei
c72571055b Merge pull request #5310 from lat9nq/fix-disable-web-service
CMakeLists: Disable YUZU_ENABLE_BOXCAT if ENABLE_WEB_SERVICE is disabled
2021-01-07 17:10:34 -08:00
lat9nq
78be397723 CMakeLists: Disable YUZU_ENABLE_BOXCAT if ENABLE_WEB_SERVICE is disabled
Boxcat is a web service but is still enabled if ENABLE_WEB_SERVICE is
disabled during the CMake stage, which causes compilation issues with
either missing headers or missing libraries.

This disables YUZU_ENABLE_BOXCAT regardless of the input if
ENABLE_WEB_SERVICE is disabled.
2021-01-07 17:28:15 -05:00
bunnei
aaf9e39f56 Merge pull request #5237 from ameerj/nvdec-syncpt
nvdec: Incorporate syncpoint manager
2021-01-07 12:42:28 -08:00
Ameer J
16392a23cc remove inaccurate reference
Co-authored-by: LC <mathew1800@gmail.com>
2021-01-07 14:33:45 -05:00
ameerj
06cef3355e fix for nvdec disabled, cleanup host1x 2021-01-07 14:33:45 -05:00
ameerj
2c27127d04 nvdec syncpt incorporation
laying the groundwork for async gpu, although this does not fully implement async nvdec operations
2021-01-07 14:33:45 -05:00
Morph
bcb702fa3e Merge pull request #5306 from MerryMage/ignore-library-Open
vulkan_library: Common::DynamicLibrary::Open is [[nodiscard]]
2021-01-08 01:44:18 +08:00
MerryMage
21199cb965 vulkan_library: Common::DynamicLibrary::Open is [[nodiscard]]
Ignore the return value on __APPLE__ systems as well
2021-01-07 17:37:47 +00:00
Morph
123568ef80 Merge pull request #5305 from MerryMage/page_shift
texture_cache: Replace PAGE_SHIFT with PAGE_BITS
2021-01-08 00:55:34 +08:00
MerryMage
aace20afc7 texture_cache: Replace PAGE_SHIFT with PAGE_BITS
PAGE_SHIFT is a #define in system headers that leaks into user code on some systems
2021-01-07 16:51:34 +00:00
lat9nq
0d24b1a31b ci/linux: Make Mainline AppImages updateable
Moves the final step for building the AppImage to the upload script.
Instructs appimagetool to embed update information into the AppImage if
the release target is Mainline. Also tells it to create a zsync file to
enable partial-downloads when updating the AppImage.

Also renames the AppImage from `yuzu-{version info}-x86_64.AppImage` to
`yuzu-{version info}.AppImage` to avoid a bug in the downloads page at
yuzu-emu.org/downloads.
2021-01-06 13:23:56 -05:00
JeremyStarTM
5b60899fbc Removed MacOS build link
The MacOS build link was removed in the README.md because it no longer exist.
2021-01-06 11:39:27 +01:00
Morph
e8d40559d5 Merge pull request #5288 from ReinUsesLisp/workaround-garbage
gl_texture_cache: Avoid format views on Intel and AMD
2021-01-06 15:39:51 +08:00
bunnei
e112d0a52f Merge pull request #5250 from lat9nq/appimage
ci/linux: Build an AppImage
2021-01-05 21:34:08 -08:00
bunnei
dc02b03c4a Merge pull request #5293 from ReinUsesLisp/return-values
core: Enforce C4715 (not all control paths return a value)
2021-01-05 19:04:15 -08:00
bunnei
275b96a0e2 Merge pull request #5289 from ReinUsesLisp/vulkan-device
vulkan_common: Move device abstraction to the common directory and allow surfaceless devices
2021-01-05 17:44:56 -08:00
ReinUsesLisp
43d9f417ae core: Enforce C4715 (not all control paths return a value) 2021-01-05 04:18:40 -03:00
ReinUsesLisp
4f13e270c8 core: Silence warnings when compiling without asserts 2021-01-05 04:18:16 -03:00
LC
2a6e6306d8 Merge pull request #5292 from ReinUsesLisp/empty-set
vk_rasterizer: Skip binding empty descriptor sets on compute
2021-01-04 21:32:57 -05:00
bunnei
4e6aa1cfdd Merge pull request #5261 from gal20/hide_mouse_patch
yuzu/main: Fix 'Hide mouse on inactivity' and port citra-emu/citra#5476
2021-01-04 17:19:04 -08:00
ReinUsesLisp
1ccf805367 vk_rasterizer: Skip binding empty descriptor sets on compute
Fixes unit tests where compute shaders had no descriptors in the set,
making Vulkan drivers crash when binding an empty set.
2021-01-04 17:56:39 -03:00
Morph
ace8a8e86e Merge pull request #5284 from ameerj/bufferq-oor-fix
buffer_queue: Fix data race by protecting queue_sequence access
2021-01-04 15:42:40 +08:00
ameerj
6b354ccaee buffer_queue: Protect queue_sequence list access with a mutex
fixes a data race as this is an unprotected variable manipulated by multiple threads
2021-01-04 01:36:41 -05:00
ReinUsesLisp
ac1e4734c2 vulkan_device: Allow creating a device without surface 2021-01-04 02:22:22 -03:00
ReinUsesLisp
d235cf3933 renderer_vulkan/nsight_aftermath_tracker: Move to vulkan_common 2021-01-04 02:22:22 -03:00
ReinUsesLisp
3753553b6a renderer_vulkan: Move device abstraction to vulkan_common 2021-01-04 02:22:22 -03:00
Rodrigo Locatti
4801f4250d Merge pull request #5286 from ReinUsesLisp/rename-vk-device
renderer_vulkan: Rename VKDevice to Device
2021-01-04 02:22:02 -03:00
ReinUsesLisp
7d904fef2e gl_texture_cache: Avoid format views on Intel and AMD
Intel and AMD proprietary drivers are incapable of rendering to texture
views of different formats than the original texture. Avoid creating
these at a cache level. This will consume more memory, emulating them
with copies.
2021-01-04 02:06:40 -03:00
ReinUsesLisp
3a49c1a691 gl_texture_cache: Create base images with sRGB
This breaks accelerated decoders trying to imageStore into images with
sRGB. The decoders are currently disabled so this won't cause issues at
runtime.
2021-01-04 01:54:54 -03:00
FearlessTobi
beb951770a Address review comments 2021-01-04 04:36:50 +01:00
xperia64
fd5776aac2 Delete the old log file before rotating (#5675) 2021-01-04 04:33:34 +01:00
Rodrigo Locatti
87a8925523 Merge pull request #5285 from lioncash/error-str
main: Resolve error string not displaying
2021-01-03 19:56:15 -03:00
ReinUsesLisp
974d731926 renderer_vulkan: Rename VKDevice to Device
The "VK" prefix predates the "Vulkan" namespace. It was carried around
the codebase for consistency. "VKDevice" currently is a bad alias with
"VkDevice" (only an upcase character of difference) that can cause
confusion. Rename all instances of it.
2021-01-03 17:51:48 -03:00
Rodrigo Locatti
7265e80c12 Merge pull request #5230 from ReinUsesLisp/vulkan-common
vulkan_common: Move reusable Vulkan abstractions to a separate directory
2021-01-03 17:38:29 -03:00
Lioncash
86592b274e main: Resolve error string not displaying
During the transition to make the error dialog translatable, I
accidentally got rid of the conversion to ResultStatus, which prevented
operator<< from being invoked during formatting.

This adds a function to directly retrieve the result status string
instead so that it displays again.
2021-01-03 13:18:04 -05:00
bunnei
71e18dddbe Merge pull request #5278 from MerryMage/cpuopt_unsafe_inaccurate_nan
dynarmic: Add Unsafe_InaccurateNaN optimization
2021-01-03 03:27:29 -08:00
bunnei
f64456c7e2 Merge pull request #5279 from bunnei/buffer-queue-connect
hle: service: nvflinger: buffer_queue: Do not reset id/layer_id on Connect.
2021-01-03 01:01:38 -08:00
Morph
ec58aabb26 Merge pull request #5281 from FearlessTobi/port-5668
Port citra-emu/citra#5668: "Update zstd to v1.4.8"
2021-01-03 12:25:21 +08:00
FearlessTobi
c90268127b Update zstd to v1.4.8
Co-Authored-By: Vitor K <29167336+vitor-k@users.noreply.github.com>
2021-01-03 01:58:14 +01:00
bunnei
bf8bd60ab3 Fix the old log file to work with the log parser. 2021-01-03 01:44:52 +01:00
xperia64
f478a57737 Rotate previous log file to '.old' if it exists 2021-01-03 01:44:42 +01:00
bunnei
235b5d27ae Merge pull request #5267 from lioncash/localize
main: Make the loader error dialog fully translatable
2021-01-02 15:44:32 -08:00
bunnei
beaa25d777 hle: service: nvflinger: buffer_queue: Do not reset id/layer_id on Connect.
- This behavior is a mistake, fixes Katana Zero.
2021-01-02 15:42:16 -08:00
MerryMage
8a5356357f externals: Update dynarmic to 3806284cb 2021-01-02 20:42:11 +00:00
bunnei
62f67df6d7 Merge pull request #5277 from Morph1984/fix-comments
general: Fix various spelling errors
2021-01-02 12:33:48 -08:00
bunnei
55fb8e7bdd Merge pull request #5273 from timleg002/patch-1
typo fix
2021-01-02 12:31:19 -08:00
MerryMage
57c9da1b39 dynarmic: Add Unsafe_InaccurateNaN optimization 2021-01-02 20:13:21 +00:00
Morph
a745d87971 general: Fix various spelling errors 2021-01-02 10:23:41 -05:00
Fernando Sahmkow
53d92318b8 X86/NativeClock: Reimplement RTDSC access to be lock free. 2021-01-02 04:00:27 +01:00
Fernando Sahmkow
d4f871cb6a X86/NativeClock: Improve performance of clock calculations on hot path. 2021-01-02 00:43:47 +01:00
bunnei
1ff341f3dc Merge pull request #5209 from Morph1984/refactor-controller-connect
configure_input: Modify controller connection delay
2021-01-01 13:10:34 -08:00
Timotej Leginus
0d47c1d527 typo fix
typo fix
2021-01-01 21:29:53 +01:00
LC
9e109849ff Merge pull request #5271 from MerryMage/rm-mem-Special
memory: Remove MemoryHook
2021-01-01 11:02:14 -05:00
Morph
904ac1daec configure_input: Modify controller connection delay
Increases the controller connection delay to 60ms and refactors it to attempt to disconnect all controllers prior to connecting all controllers in HID.
2021-01-01 06:39:24 -05:00
MerryMage
6d30745d77 memory: Remove MemoryHook 2021-01-01 11:34:38 +00:00
bunnei
eb318ffffc Merge pull request #5249 from ReinUsesLisp/lock-free-pages
core/memory: Read and write page table atomically
2021-01-01 02:54:01 -08:00
bunnei
0bddb794b0 Merge pull request #5239 from FearlessTobi/enable-translation
.ci/templates: Enable QT translation for MSVC CI
2020-12-31 23:31:23 -08:00
gal20
5dfb8743cb yuzu/main: fix mouse not showing on move and port citra-emu/citra#5476 2020-12-31 21:16:09 +02:00
Lioncash
8c27a74132 main: Make the loader error dialog fully translatable
Makes the dialog fully localizable and also adds disambiguation comments
to help translators understand what the formatting specifiers indicate.
2020-12-31 12:44:31 -05:00
Lioncash
803ac4ca59 main: Tidy up enum comparison
enum classes are comparable with one another, so these casts aren't
necessary.
2020-12-31 10:21:15 -05:00
ReinUsesLisp
cdbee27692 vulkan_instance: Allow different Vulkan versions and enforce 1.1
For listing the available physical devices we can use Vulkan 1.0.
Now that MoltenVK supports 1.1 we can require it for running games.

Add missing documentation.
2020-12-31 02:07:34 -03:00
ReinUsesLisp
7344a7c447 vk_device: Use an array to report lacking device limits
This makes easier to add and tune the required device limits.
2020-12-31 02:07:34 -03:00
ReinUsesLisp
f687392e6f vk_device: Stop initialization when device is not suitable
VKDevice::IsSuitable was not being called. To address this issue, check
suitability before initialization and throw an exception if it fails.

By doing this, we can deduplicate some code on queue searches.
Previosuly we would first search if a present and graphics queue
existed, then on initialization we would search again to find the index.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
53ea06dc17 renderer_vulkan: Remove two step initialization on VKDevice
The Vulkan device abstraction either initializes successfully on the
constructor or throws a Vulkan exception.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
085adfea00 renderer_vulkan: Throw when enumerating devices fails
Report device enumeration errors with exceptions to be consistent with
other initialization related function calls. Reduces the amount of code
to maintain.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
11f0f7598d renderer_vulkan: Initialize surface in separate file
Move surface initialization code to a separate file. It's unlikely to
use this code outside of Vulkan, but keeping platform-specific code
(Win32, Xlib, Wayland) in its own translation unit keeps things cleaner.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
dce8720780 renderer_vulkan: Catch and report exceptions
Move more Vulkan code to report errors with exceptions and report them
through a log before notifying it with an error boolean for backwards
compatibility. In the future we can replace the rasterizer two-step
initialization to always use exceptions.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
47843b4f09 renderer_vulkan: Create debug callback on separate file and throw
Initialize debug callbacks (messenger) from a separate file. This allows
sharing code with different backends.

Change our Vulkan error handling to use exceptions instead of error
codes, simplifying the initialization process.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
25f88d99ce renderer_vulkan: Move instance initialization to a separate file
Simplify Vulkan's backend initialization code by moving it to a separate
file, allowing us to initialize a Vulkan instance from different
backends.
2020-12-31 02:07:33 -03:00
ReinUsesLisp
d1435009ed vulkan_common: Rename renderer_vulkan/wrapper.h to vulkan_common/vulkan_wrapper.h
Allows sharing Vulkan wrapper code between different rendering backends.
2020-12-31 02:07:14 -03:00
ReinUsesLisp
d937421422 vulkan_common: Move dynamic library load to a separate file
Allows us to initialize a Vulkan dynamic library from different backends
without duplicating code.
2020-12-31 02:02:48 -03:00
lat9nq
43cad754d5 ci: Build an AppImage
This builds yuzu in an AppImage alongside the other archives during
release. Required to allow distributing yuzu in the future with upgraded
dependencies, such as Qt.
2020-12-30 16:05:15 -05:00
ReinUsesLisp
b3587102d1 core/memory: Read and write page table atomically
Squash attributes into the pointer's integer, making them an uintptr_t
pair containing 2 bits at the bottom and then the pointer. These bits
are currently unused thanks to alignment requirements.

Configure Dynarmic to mask out these bits on pointer reads.

While we are at it, remove some unused attributes carried over from
Citra.

Read/Write and other hot functions use a two step unpacking process that
is less readable to stop MSVC from emitting an extra AND instruction in
the hot path:

 mov         rdi,rcx
 shr         rdx,0Ch
 mov         r8,qword ptr [rax+8]
 mov         rax,qword ptr [r8+rdx*8]
 mov         rdx,rax
-and         al,3
 and         rdx,0FFFFFFFFFFFFFFFCh
 je          Core::Memory::Memory::Impl::Read<unsigned char>
 mov         rax,qword ptr [vaddr]
 movzx       eax,byte ptr [rdx+rax]
2020-12-29 21:54:49 -03:00
FearlessTobi
368b3ee227 .ci/templates: Enable QT translation for MSVC CI
Previously this flag was missing, causing translation files not to be shipped with CI builds of yuzu.
2020-12-28 15:54:02 +01:00
ReinUsesLisp
661483f313 renderer_vulkan/fixed_pipeline_state: Move enabled bindings to static state
Without using VK_EXT_robustness2, we can't consider the 'enabled' (not
null) vertex buffers as dynamic state, as this leads to invalid Vulkan
state. Move this to static state that is always hashed and compared in
the pipeline key.

The bits for enabled vertex buffers are moved into the attribute state
bitfield. This is not 'correct' as it's not an attribute state, but that
struct has bits to spare, and it's used in an array of 32 elements (the
exact same number of vertex buffer bindings).
2020-12-25 23:34:38 -03:00
Morph
ff3aa5d380 yuzu/main: Add basic command line arguments
The following command line arguments are supported:

yuzu.exe "path_to_game" - Launches a game at "path_to_game"
yuzu.exe -f - Launches the next game in fullscreen
yuzu.exe -g "path_to_game" - Launches a game at "path_to_game"
yuzu.exe -f -g "path_to_game" - Launches a game at "path_to_game" in fullscreen
2020-12-25 15:41:00 -05:00
Levi Behunin
bc69cc1511 More forgetting... duh 2020-09-24 22:12:13 -06:00
Levi Behunin
24c1bb3842 Forgot to apply suggestion here as well 2020-09-24 21:58:51 -06:00
Levi Behunin
a19dc3bf00 Address Comments 2020-09-24 21:52:23 -06:00
Levi Behunin
d53b79ff5c Start of Integer flags implementation 2020-09-24 16:40:06 -06:00
396 changed files with 15020 additions and 12124 deletions

View File

@@ -15,5 +15,5 @@ mv "${REV_NAME}-source.tar.xz" $RELEASE_NAME
7z a "$REV_NAME.7z" $RELEASE_NAME
# move the compiled archive into the artifacts directory to be uploaded by travis releases
mv "$ARCHIVE_NAME" artifacts/
mv "$REV_NAME.7z" artifacts/
mv "$ARCHIVE_NAME" "${ARTIFACTS_DIR}/"
mv "$REV_NAME.7z" "${ARTIFACTS_DIR}/"

View File

@@ -2,5 +2,6 @@
GITDATE="`git show -s --date=short --format='%ad' | sed 's/-//g'`"
GITREV="`git show -s --format='%h'`"
ARTIFACTS_DIR="artifacts"
mkdir -p artifacts
mkdir -p "${ARTIFACTS_DIR}/"

View File

@@ -1,14 +1,49 @@
#!/bin/bash -ex
# Exit on error, rather than continuing with the rest of the script.
set -e
cd /yuzu
ccache -s
mkdir build || true && cd build
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON
cmake .. -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DCMAKE_INSTALL_PREFIX="/usr"
ninja
make -j$(nproc)
ccache -s
ctest -VV -C Release
make install DESTDIR=AppDir
rm -vf AppDir/usr/bin/yuzu-cmd AppDir/usr/bin/yuzu-tester
# Download tools needed to build an AppImage
wget -nc https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage
wget -nc https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-x86_64.AppImage
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/AppRun-patched-x86_64
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/exec-x86_64.so
# Set executable bit
chmod 755 \
AppRun-patched-x86_64 \
exec-x86_64.so \
linuxdeploy-x86_64.AppImage \
linuxdeploy-plugin-qt-x86_64.AppImage
# Workaround for https://github.com/AppImage/AppImageKit/issues/828
export APPIMAGE_EXTRACT_AND_RUN=1
mkdir -p AppDir/usr/optional
mkdir -p AppDir/usr/optional/libstdc++
mkdir -p AppDir/usr/optional/libgcc_s
# Deploy yuzu's needed dependencies
./linuxdeploy-x86_64.AppImage --appdir AppDir --plugin qt
# Workaround for building yuzu with GCC 10 but also trying to distribute it to Ubuntu 18.04 et al.
# See https://github.com/darealshinji/AppImageKit-checkrt
cp exec-x86_64.so AppDir/usr/optional/exec.so
cp AppRun-patched-x86_64 AppDir/AppRun
cp --dereference /usr/lib/x86_64-linux-gnu/libstdc++.so.6 AppDir/usr/optional/libstdc++/libstdc++.so.6
cp --dereference /lib/x86_64-linux-gnu/libgcc_s.so.1 AppDir/usr/optional/libgcc_s/libgcc_s.so.1

View File

@@ -2,6 +2,7 @@
. .ci/scripts/common/pre-upload.sh
APPIMAGE_NAME="yuzu-${GITDATE}-${GITREV}.AppImage"
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
ARCHIVE_NAME="${REV_NAME}.tar.xz"
COMPRESSION_FLAGS="-cJvf"
@@ -17,4 +18,24 @@ mkdir "$DIR_NAME"
cp build/bin/yuzu-cmd "$DIR_NAME"
cp build/bin/yuzu "$DIR_NAME"
# Build an AppImage
cd build
wget -nc https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
chmod 755 appimagetool-x86_64.AppImage
if [ "${RELEASE_NAME}" = "mainline" ]; then
# Generate update information if releasing to mainline
./appimagetool-x86_64.AppImage -u "gh-releases-zsync|yuzu-emu|yuzu-${RELEASE_NAME}|latest|yuzu-*.AppImage.zsync" AppDir "${APPIMAGE_NAME}"
else
./appimagetool-x86_64.AppImage AppDir "${APPIMAGE_NAME}"
fi
cd ..
# Copy the AppImage and update info to the artifacts directory and avoid compressing it
cp "build/${APPIMAGE_NAME}" "${ARTIFACTS_DIR}/"
if [ -f "build/${APPIMAGE_NAME}.zsync" ]; then
cp "build/${APPIMAGE_NAME}.zsync" "${ARTIFACTS_DIR}/"
fi
. .ci/scripts/common/post-upload.sh

View File

@@ -8,7 +8,7 @@ steps:
displayName: 'Install vulkan-sdk'
- script: python -m pip install --upgrade pip conan
displayName: 'Install conan'
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
displayName: 'Configure CMake'
- task: MSBuild@1
displayName: 'Build'

View File

@@ -26,6 +26,10 @@ option(ENABLE_CUBEB "Enables the cubeb audio backend" ON)
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
if (NOT ENABLE_WEB_SERVICE)
set(YUZU_ENABLE_BOXCAT OFF)
endif()
# Default to a Release build
get_property(IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if (NOT IS_MULTI_CONFIG AND NOT CMAKE_BUILD_TYPE)
@@ -165,7 +169,7 @@ macro(yuzu_find_packages)
"lz4 1.8 lz4/1.9.2"
"nlohmann_json 3.8 nlohmann_json/3.8.0"
"ZLIB 1.2 zlib/1.2.11"
"zstd 1.4 zstd/1.4.5"
"zstd 1.4 zstd/1.4.8"
)
foreach(PACKAGE ${REQUIRED_LIBS})
@@ -239,7 +243,7 @@ if(ENABLE_QT)
if (YUZU_USE_QT_WEB_ENGINE)
find_package(Qt5 COMPONENTS WebEngineCore WebEngineWidgets)
endif()
if (ENABLE_QT_TRANSLATION)
find_package(Qt5 REQUIRED COMPONENTS LinguistTools ${QT_PREFIX_HINT})
endif()
@@ -257,7 +261,7 @@ if(ENABLE_SDL2)
find_package(SDL2)
if (NOT SDL2_FOUND)
# otherwise add this to the list of libraries to install
list(APPEND CONAN_REQUIRED_LIBS "sdl2/2.0.12@bincrafters/stable")
list(APPEND CONAN_REQUIRED_LIBS "sdl2/2.0.14@bincrafters/stable")
endif()
endif()
@@ -322,7 +326,7 @@ if (CONAN_REQUIRED_LIBS)
list(APPEND Boost_LIBRARIES Boost::context)
endif()
endif()
# Due to issues with variable scopes in functions, we need to also find_package(qt5) outside of the function
if(ENABLE_QT)
list(APPEND CMAKE_MODULE_PATH "${CONAN_QT_ROOT_RELEASE}")

View File

@@ -30,7 +30,6 @@ If you want to contribute to the user interface translation, please check out th
* __Windows__: [Windows Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Windows)
* __Linux__: [Linux Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Linux)
* __macOS__: [macOS Build](https://github.com/yuzu-emu/yuzu/wiki/Building-for-macOS)
### Support

View File

@@ -45,10 +45,15 @@ if (MSVC)
# Warnings
/W3
/we4062 # enumerator 'identifier' in a switch of enum 'enumeration' is not handled
/we4101 # 'identifier': unreferenced local variable
/we4265 # 'class': class has virtual functions, but destructor is not virtual
/we4388 # signed/unsigned mismatch
/we4547 # 'operator' : operator before comma has no effect; expected operator with side-effect
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
/we4555 # Expression has no effect; expected expression with side-effect
/we4834 # Discarding return value of function with 'nodiscard' attribute
/we5038 # data member 'member1' will be initialized after data member 'member2'
)
# /GS- - No stack buffer overflow checks
@@ -59,11 +64,16 @@ if (MSVC)
else()
add_compile_options(
-Wall
-Werror=array-bounds
-Werror=implicit-fallthrough
-Werror=missing-declarations
-Werror=missing-field-initializers
-Werror=reorder
-Werror=switch
-Werror=uninitialized
-Werror=unused-function
-Werror=unused-result
-Werror=unused-variable
-Wextra
-Wmissing-declarations
-Wno-attributes
@@ -122,7 +132,6 @@ add_subdirectory(tests)
if (ENABLE_SDL2)
add_subdirectory(yuzu_cmd)
add_subdirectory(yuzu_tester)
endif()
if (ENABLE_QT)

View File

@@ -383,11 +383,14 @@ void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, E
const auto channel_count = params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
// TODO(ogniK): Actually implement reverb
/*
if (params.input[i] != params.output[i]) {
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
}
}*/
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
std::memset(output, 0, worker_params.sample_count * sizeof(s32));
}
}

View File

@@ -40,17 +40,17 @@ public:
SinkSampleFormat sample_format;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
bool in_use;
INSERT_UNION_PADDING_BYTES(5);
INSERT_PADDING_BYTES_NOINIT(5);
};
static_assert(sizeof(CircularBufferIn) == 0x28,
"SinkInfo::CircularBufferIn is in invalid size");
struct DeviceIn {
std::array<u8, 255> device_name;
INSERT_UNION_PADDING_BYTES(1);
INSERT_PADDING_BYTES_NOINIT(1);
s32_le input_count;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
INSERT_UNION_PADDING_BYTES(1);
INSERT_PADDING_BYTES_NOINIT(1);
bool down_matrix_enabled;
DownmixCoefficients down_matrix_coef;
};

View File

@@ -51,6 +51,14 @@ void Stream::Stop() {
UNIMPLEMENTED();
}
bool Stream::Flush() {
const bool had_buffers = !queued_buffers.empty();
while (!queued_buffers.empty()) {
queued_buffers.pop();
}
return had_buffers;
}
void Stream::SetVolume(float volume) {
game_volume = volume;
}

View File

@@ -56,6 +56,9 @@ public:
/// Queues a buffer into the audio stream, returns true on success
bool QueueBuffer(BufferPtr&& buffer);
/// Flush audio buffers
bool Flush();
/// Returns true if the audio stream contains a buffer with the specified tag
[[nodiscard]] bool ContainsBuffer(Buffer::Tag tag) const;

View File

@@ -86,28 +86,28 @@ struct BehaviorFlags {
static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
struct ADPCMContext {
u16 header{};
s16 yn1{};
s16 yn2{};
u16 header;
s16 yn1;
s16 yn2;
};
static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
struct VoiceState {
s64 played_sample_count{};
s32 offset{};
s32 wave_buffer_index{};
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid{};
s32 wave_buffer_consumed{};
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history{};
s32 fraction{};
VAddr context_address{};
Codec::ADPCM_Coeff coeff{};
ADPCMContext context{};
std::array<s64, 2> biquad_filter_state{};
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples{};
u32 external_context_size{};
bool is_external_context_used{};
bool voice_dropped{};
s64 played_sample_count;
s32 offset;
s32 wave_buffer_index;
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid;
s32 wave_buffer_consumed;
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history;
s32 fraction;
VAddr context_address;
Codec::ADPCM_Coeff coeff;
ADPCMContext context;
std::array<s64, 2> biquad_filter_state;
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples;
u32 external_context_size;
bool is_external_context_used;
bool voice_dropped;
};
class VoiceChannelResource {

View File

@@ -98,7 +98,6 @@ add_library(common STATIC
algorithm.h
alignment.h
assert.h
atomic_ops.cpp
atomic_ops.h
detached_tasks.cpp
detached_tasks.h
@@ -108,7 +107,6 @@ add_library(common STATIC
bit_util.h
cityhash.cpp
cityhash.h
color.h
common_funcs.h
common_paths.h
common_types.h
@@ -123,6 +121,7 @@ add_library(common STATIC
hash.h
hex_util.cpp
hex_util.h
intrusive_red_black_tree.h
logging/backend.cpp
logging/backend.h
logging/filter.cpp
@@ -135,16 +134,17 @@ add_library(common STATIC
math_util.h
memory_detect.cpp
memory_detect.h
memory_hook.cpp
memory_hook.h
microprofile.cpp
microprofile.h
microprofileui.h
misc.cpp
nvidia_flags.cpp
nvidia_flags.h
page_table.cpp
page_table.h
param_package.cpp
param_package.h
parent_of_member.h
quaternion.h
ring_buffer.h
scm_rev.cpp
@@ -167,8 +167,7 @@ add_library(common STATIC
threadsafe_queue.h
time_zone.cpp
time_zone.h
timer.cpp
timer.h
tree.h
uint128.cpp
uint128.h
uuid.cpp

View File

@@ -9,50 +9,45 @@
namespace Common {
template <typename T>
[[nodiscard]] constexpr T AlignUp(T value, std::size_t size) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUp(T value, size_t size) {
auto mod{static_cast<T>(value % size)};
value -= mod;
return static_cast<T>(mod == T{0} ? value : value + size);
}
template <typename T>
[[nodiscard]] constexpr T AlignDown(T value, std::size_t size) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUpLog2(T value, size_t align_log2) {
return static_cast<T>((value + ((1ULL << align_log2) - 1)) >> align_log2 << align_log2);
}
template <typename T>
requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignDown(T value, size_t size) {
return static_cast<T>(value - value % size);
}
template <typename T>
[[nodiscard]] constexpr T AlignBits(T value, std::size_t align) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align);
}
template <typename T>
[[nodiscard]] constexpr bool Is4KBAligned(T value) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool Is4KBAligned(T value) {
return (value & 0xFFF) == 0;
}
template <typename T>
[[nodiscard]] constexpr bool IsWordAligned(T value) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool IsWordAligned(T value) {
return (value & 0b11) == 0;
}
template <typename T>
[[nodiscard]] constexpr bool IsAligned(T value, std::size_t alignment) {
using U = typename std::make_unsigned<T>::type;
requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, size_t alignment) {
using U = typename std::make_unsigned_t<T>;
const U mask = static_cast<U>(alignment - 1);
return (value & mask) == 0;
}
template <typename T, std::size_t Align = 16>
template <typename T, size_t Align = 16>
class AlignmentAllocator {
public:
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using size_type = size_t;
using difference_type = ptrdiff_t;
using propagate_on_container_copy_assignment = std::true_type;
using propagate_on_container_move_assignment = std::true_type;

View File

@@ -29,22 +29,19 @@ assert_noinline_call(const Fn& fn) {
}
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
} \
while (0)
if (!(_a_)) { \
LOG_CRITICAL(Debug, "Assertion Failed!"); \
}
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
} \
while (0)
if (!(_a_)) { \
LOG_CRITICAL(Debug, "Assertion Failed! " __VA_ARGS__); \
}
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE() \
{ LOG_CRITICAL(Debug, "Unreachable code!"); }
#define UNREACHABLE_MSG(...) \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
{ LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); }
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)

View File

@@ -1,75 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstring>
#include "common/atomic_ops.h"
#if _MSC_VER
#include <intrin.h>
#endif
namespace Common {
#if _MSC_VER
bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
const u8 result =
_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
return result == expected;
}
bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
const u16 result =
_InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
return result == expected;
}
bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
const u32 result =
_InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
return result == expected;
}
bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
value, expected);
return result == expected;
}
bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
value[0],
reinterpret_cast<__int64*>(expected.data())) != 0;
}
#else
bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
unsigned __int128 value_a;
unsigned __int128 expected_a;
std::memcpy(&value_a, value.data(), sizeof(u128));
std::memcpy(&expected_a, expected.data(), sizeof(u128));
return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
}
#endif
} // namespace Common

View File

@@ -4,14 +4,75 @@
#pragma once
#include <cstring>
#include <memory>
#include "common/common_types.h"
#if _MSC_VER
#include <intrin.h>
#endif
namespace Common {
[[nodiscard]] bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected);
[[nodiscard]] bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected);
[[nodiscard]] bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected);
[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected);
[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected);
#if _MSC_VER
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
const u8 result =
_InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
return result == expected;
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
const u16 result =
_InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
return result == expected;
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
const u32 result =
_InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
return result == expected;
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
value, expected);
return result == expected;
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
value[0],
reinterpret_cast<__int64*>(expected.data())) != 0;
}
#else
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
unsigned __int128 value_a;
unsigned __int128 expected_a;
std::memcpy(&value_a, value.data(), sizeof(u128));
std::memcpy(&expected_a, expected.data(), sizeof(u128));
return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
}
#endif
} // namespace Common

View File

@@ -4,13 +4,10 @@
#pragma once
#include <bit>
#include <climits>
#include <cstddef>
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include "common/common_types.h"
namespace Common {
@@ -21,124 +18,30 @@ template <typename T>
return sizeof(T) * CHAR_BIT;
}
#ifdef _MSC_VER
[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) {
unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value) != 0) {
return 31 - leading_zero;
}
return 32;
[[nodiscard]] constexpr u32 MostSignificantBit32(const u32 value) {
return 31U - static_cast<u32>(std::countl_zero(value));
}
[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value) != 0) {
return 63 - leading_zero;
}
return 64;
}
#else
[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) {
if (value == 0) {
return 32;
}
return static_cast<u32>(__builtin_clz(value));
[[nodiscard]] constexpr u32 MostSignificantBit64(const u64 value) {
return 63U - static_cast<u32>(std::countl_zero(value));
}
[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
if (value == 0) {
return 64;
}
return static_cast<u32>(__builtin_clzll(value));
}
#endif
#ifdef _MSC_VER
[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value) != 0) {
return trailing_zero;
}
return 32;
}
[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
unsigned long trailing_zero = 0;
if (_BitScanForward64(&trailing_zero, value) != 0) {
return trailing_zero;
}
return 64;
}
#else
[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
if (value == 0) {
return 32;
}
return static_cast<u32>(__builtin_ctz(value));
}
[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
if (value == 0) {
return 64;
}
return static_cast<u32>(__builtin_ctzll(value));
}
#endif
#ifdef _MSC_VER
[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) {
unsigned long result;
_BitScanReverse(&result, value);
return static_cast<u32>(result);
}
[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) {
unsigned long result;
_BitScanReverse64(&result, value);
return static_cast<u32>(result);
}
#else
[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) {
return 31U - static_cast<u32>(__builtin_clz(value));
}
[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) {
return 63U - static_cast<u32>(__builtin_clzll(value));
}
#endif
[[nodiscard]] inline u32 Log2Floor32(const u32 value) {
[[nodiscard]] constexpr u32 Log2Floor32(const u32 value) {
return MostSignificantBit32(value);
}
[[nodiscard]] inline u32 Log2Ceil32(const u32 value) {
const u32 log2_f = Log2Floor32(value);
return log2_f + ((value ^ (1U << log2_f)) != 0U);
}
[[nodiscard]] inline u32 Log2Floor64(const u64 value) {
[[nodiscard]] constexpr u32 Log2Floor64(const u64 value) {
return MostSignificantBit64(value);
}
[[nodiscard]] inline u32 Log2Ceil64(const u64 value) {
const u64 log2_f = static_cast<u64>(Log2Floor64(value));
return static_cast<u32>(log2_f + ((value ^ (1ULL << log2_f)) != 0ULL));
[[nodiscard]] constexpr u32 Log2Ceil32(const u32 value) {
const u32 log2_f = Log2Floor32(value);
return log2_f + static_cast<u32>((value ^ (1U << log2_f)) != 0U);
}
[[nodiscard]] constexpr u32 Log2Ceil64(const u64 value) {
const u64 log2_f = Log2Floor64(value);
return static_cast<u32>(log2_f + static_cast<u64>((value ^ (1ULL << log2_f)) != 0ULL));
}
} // namespace Common

View File

@@ -1,271 +0,0 @@
// Copyright 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstring>
#include "common/common_types.h"
#include "common/swap.h"
#include "common/vector_math.h"
namespace Common::Color {
/// Convert a 1-bit color component to 8 bit
[[nodiscard]] constexpr u8 Convert1To8(u8 value) {
return value * 255;
}
/// Convert a 4-bit color component to 8 bit
[[nodiscard]] constexpr u8 Convert4To8(u8 value) {
return (value << 4) | value;
}
/// Convert a 5-bit color component to 8 bit
[[nodiscard]] constexpr u8 Convert5To8(u8 value) {
return (value << 3) | (value >> 2);
}
/// Convert a 6-bit color component to 8 bit
[[nodiscard]] constexpr u8 Convert6To8(u8 value) {
return (value << 2) | (value >> 4);
}
/// Convert a 8-bit color component to 1 bit
[[nodiscard]] constexpr u8 Convert8To1(u8 value) {
return value >> 7;
}
/// Convert a 8-bit color component to 4 bit
[[nodiscard]] constexpr u8 Convert8To4(u8 value) {
return value >> 4;
}
/// Convert a 8-bit color component to 5 bit
[[nodiscard]] constexpr u8 Convert8To5(u8 value) {
return value >> 3;
}
/// Convert a 8-bit color component to 6 bit
[[nodiscard]] constexpr u8 Convert8To6(u8 value) {
return value >> 2;
}
/**
* Decode a color stored in RGBA8 format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA8(const u8* bytes) {
return {bytes[3], bytes[2], bytes[1], bytes[0]};
}
/**
* Decode a color stored in RGB8 format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB8(const u8* bytes) {
return {bytes[2], bytes[1], bytes[0], 255};
}
/**
* Decode a color stored in RG8 (aka HILO8) format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRG8(const u8* bytes) {
return {bytes[1], bytes[0], 0, 255};
}
/**
* Decode a color stored in RGB565 format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB565(const u8* bytes) {
u16_le pixel;
std::memcpy(&pixel, bytes, sizeof(pixel));
return {Convert5To8((pixel >> 11) & 0x1F), Convert6To8((pixel >> 5) & 0x3F),
Convert5To8(pixel & 0x1F), 255};
}
/**
* Decode a color stored in RGB5A1 format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRGB5A1(const u8* bytes) {
u16_le pixel;
std::memcpy(&pixel, bytes, sizeof(pixel));
return {Convert5To8((pixel >> 11) & 0x1F), Convert5To8((pixel >> 6) & 0x1F),
Convert5To8((pixel >> 1) & 0x1F), Convert1To8(pixel & 0x1)};
}
/**
* Decode a color stored in RGBA4 format
* @param bytes Pointer to encoded source color
* @return Result color decoded as Common::Vec4<u8>
*/
[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA4(const u8* bytes) {
u16_le pixel;
std::memcpy(&pixel, bytes, sizeof(pixel));
return {Convert4To8((pixel >> 12) & 0xF), Convert4To8((pixel >> 8) & 0xF),
Convert4To8((pixel >> 4) & 0xF), Convert4To8(pixel & 0xF)};
}
/**
* Decode a depth value stored in D16 format
* @param bytes Pointer to encoded source value
* @return Depth value as an u32
*/
[[nodiscard]] inline u32 DecodeD16(const u8* bytes) {
u16_le data;
std::memcpy(&data, bytes, sizeof(data));
return data;
}
/**
* Decode a depth value stored in D24 format
* @param bytes Pointer to encoded source value
* @return Depth value as an u32
*/
[[nodiscard]] inline u32 DecodeD24(const u8* bytes) {
return (bytes[2] << 16) | (bytes[1] << 8) | bytes[0];
}
/**
* Decode a depth value and a stencil value stored in D24S8 format
* @param bytes Pointer to encoded source values
* @return Resulting values stored as a Common::Vec2
*/
[[nodiscard]] inline Common::Vec2<u32> DecodeD24S8(const u8* bytes) {
return {static_cast<u32>((bytes[2] << 16) | (bytes[1] << 8) | bytes[0]), bytes[3]};
}
/**
* Encode a color as RGBA8 format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRGBA8(const Common::Vec4<u8>& color, u8* bytes) {
bytes[3] = color.r();
bytes[2] = color.g();
bytes[1] = color.b();
bytes[0] = color.a();
}
/**
* Encode a color as RGB8 format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRGB8(const Common::Vec4<u8>& color, u8* bytes) {
bytes[2] = color.r();
bytes[1] = color.g();
bytes[0] = color.b();
}
/**
* Encode a color as RG8 (aka HILO8) format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRG8(const Common::Vec4<u8>& color, u8* bytes) {
bytes[1] = color.r();
bytes[0] = color.g();
}
/**
* Encode a color as RGB565 format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRGB565(const Common::Vec4<u8>& color, u8* bytes) {
const u16_le data =
(Convert8To5(color.r()) << 11) | (Convert8To6(color.g()) << 5) | Convert8To5(color.b());
std::memcpy(bytes, &data, sizeof(data));
}
/**
* Encode a color as RGB5A1 format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRGB5A1(const Common::Vec4<u8>& color, u8* bytes) {
const u16_le data = (Convert8To5(color.r()) << 11) | (Convert8To5(color.g()) << 6) |
(Convert8To5(color.b()) << 1) | Convert8To1(color.a());
std::memcpy(bytes, &data, sizeof(data));
}
/**
* Encode a color as RGBA4 format
* @param color Source color to encode
* @param bytes Destination pointer to store encoded color
*/
inline void EncodeRGBA4(const Common::Vec4<u8>& color, u8* bytes) {
const u16 data = (Convert8To4(color.r()) << 12) | (Convert8To4(color.g()) << 8) |
(Convert8To4(color.b()) << 4) | Convert8To4(color.a());
std::memcpy(bytes, &data, sizeof(data));
}
/**
* Encode a 16 bit depth value as D16 format
* @param value 16 bit source depth value to encode
* @param bytes Pointer where to store the encoded value
*/
inline void EncodeD16(u32 value, u8* bytes) {
const u16_le data = static_cast<u16>(value);
std::memcpy(bytes, &data, sizeof(data));
}
/**
* Encode a 24 bit depth value as D24 format
* @param value 24 bit source depth value to encode
* @param bytes Pointer where to store the encoded value
*/
inline void EncodeD24(u32 value, u8* bytes) {
bytes[0] = value & 0xFF;
bytes[1] = (value >> 8) & 0xFF;
bytes[2] = (value >> 16) & 0xFF;
}
/**
* Encode a 24 bit depth and 8 bit stencil values as D24S8 format
* @param depth 24 bit source depth value to encode
* @param stencil 8 bit source stencil value to encode
* @param bytes Pointer where to store the encoded value
*/
inline void EncodeD24S8(u32 depth, u8 stencil, u8* bytes) {
bytes[0] = depth & 0xFF;
bytes[1] = (depth >> 8) & 0xFF;
bytes[2] = (depth >> 16) & 0xFF;
bytes[3] = stencil;
}
/**
* Encode a 24 bit depth value as D24X8 format (32 bits per pixel with 8 bits unused)
* @param depth 24 bit source depth value to encode
* @param bytes Pointer where to store the encoded value
* @note unused bits will not be modified
*/
inline void EncodeD24X8(u32 depth, u8* bytes) {
bytes[0] = depth & 0xFF;
bytes[1] = (depth >> 8) & 0xFF;
bytes[2] = (depth >> 16) & 0xFF;
}
/**
* Encode an 8 bit stencil value as X24S8 format (32 bits per pixel with 24 bits unused)
* @param stencil 8 bit source stencil value to encode
* @param bytes Pointer where to store the encoded value
* @note unused bits will not be modified
*/
inline void EncodeX24S8(u8 stencil, u8* bytes) {
bytes[3] = stencil;
}
} // namespace Common::Color

View File

@@ -24,10 +24,10 @@
#define INSERT_PADDING_WORDS(num_words) \
std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
/// These are similar to the INSERT_PADDING_* macros, but are needed for padding unions. This is
/// because unions can only be initialized by one member.
#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
/// These are similar to the INSERT_PADDING_* macros but do not zero-initialize the contents.
/// This keeps the structure trivial to construct.
#define INSERT_PADDING_BYTES_NOINIT(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
#define INSERT_PADDING_WORDS_NOINIT(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
#ifndef _MSC_VER
@@ -93,6 +93,31 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
return static_cast<T>(key) == 0; \
}
/// Evaluates a boolean expression, and returns a result unless that expression is true.
#define R_UNLESS(expr, res) \
{ \
if (!(expr)) { \
if (res.IsError()) { \
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
} \
return res; \
} \
}
#define R_SUCCEEDED(res) (res.IsSuccess())
/// Evaluates an expression that returns a result, and returns the result if it would fail.
#define R_TRY(res_expr) \
{ \
const auto _tmp_r_try_rc = (res_expr); \
if (_tmp_r_try_rc.IsError()) { \
return _tmp_r_try_rc; \
} \
}
/// Evaluates a boolean expression, and succeeds if that expression is true.
#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), RESULT_SUCCESS)
namespace Common {
[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {

View File

@@ -11,16 +11,16 @@ namespace Common {
/// Ceiled integer division.
template <typename N, typename D>
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeil(
N number, D divisor) {
return (static_cast<D>(number) + divisor - 1) / divisor;
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeil(N number,
D divisor) {
return static_cast<N>((static_cast<D>(number) + divisor - 1) / divisor);
}
/// Ceiled integer division with logarithmic divisor in base 2
template <typename N, typename D>
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeilLog2(
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeilLog2(
N value, D alignment_log2) {
return (static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2;
return static_cast<N>((static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2);
}
} // namespace Common

View File

@@ -0,0 +1,602 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/parent_of_member.h"
#include "common/tree.h"
namespace Common {
namespace impl {
class IntrusiveRedBlackTreeImpl;
}
struct IntrusiveRedBlackTreeNode {
public:
using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
constexpr IntrusiveRedBlackTreeNode() = default;
void SetEntry(const EntryType& new_entry) {
entry = new_entry;
}
[[nodiscard]] EntryType& GetEntry() {
return entry;
}
[[nodiscard]] const EntryType& GetEntry() const {
return entry;
}
private:
EntryType entry{};
friend class impl::IntrusiveRedBlackTreeImpl;
template <class, class, class>
friend class IntrusiveRedBlackTree;
};
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree;
namespace impl {
class IntrusiveRedBlackTreeImpl {
private:
template <class, class, class>
friend class ::Common::IntrusiveRedBlackTree;
using RootType = RBHead<IntrusiveRedBlackTreeNode>;
RootType root;
public:
template <bool Const>
class Iterator;
using value_type = IntrusiveRedBlackTreeNode;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
template <bool Const>
class Iterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
IntrusiveRedBlackTreeImpl::pointer>;
using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
IntrusiveRedBlackTreeImpl::reference>;
private:
pointer node;
public:
explicit Iterator(pointer n) : node(n) {}
bool operator==(const Iterator& rhs) const {
return this->node == rhs.node;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return this->node;
}
reference operator*() const {
return *this->node;
}
Iterator& operator++() {
this->node = GetNext(this->node);
return *this;
}
Iterator& operator--() {
this->node = GetPrev(this->node);
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
operator Iterator<true>() const {
return Iterator<true>(this->node);
}
};
private:
// Define accessors using RB_* functions.
bool EmptyImpl() const {
return root.IsEmpty();
}
IntrusiveRedBlackTreeNode* GetMinImpl() const {
return RB_MIN(const_cast<RootType*>(&root));
}
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
return RB_MAX(const_cast<RootType*>(&root));
}
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
return RB_REMOVE(&root, node);
}
public:
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
return RB_NEXT(node);
}
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
return RB_PREV(node);
}
static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
public:
constexpr IntrusiveRedBlackTreeImpl() {}
// Iterator accessors.
iterator begin() {
return iterator(this->GetMinImpl());
}
const_iterator begin() const {
return const_iterator(this->GetMinImpl());
}
iterator end() {
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
}
const_iterator end() const {
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
iterator iterator_to(reference ref) {
return iterator(&ref);
}
const_iterator iterator_to(const_reference ref) const {
return const_iterator(&ref);
}
// Content management.
bool empty() const {
return this->EmptyImpl();
}
reference back() {
return *this->GetMaxImpl();
}
const_reference back() const {
return *this->GetMaxImpl();
}
reference front() {
return *this->GetMinImpl();
}
const_reference front() const {
return *this->GetMinImpl();
}
iterator erase(iterator it) {
auto cur = std::addressof(*it);
auto next = GetNext(cur);
this->RemoveImpl(cur);
return iterator(next);
}
};
} // namespace impl
template <typename T>
concept HasLightCompareType = requires {
{ std::is_same<typename T::LightCompareType, void>::value }
->std::convertible_to<bool>;
};
namespace impl {
template <typename T, typename Default>
consteval auto* GetLightCompareType() {
if constexpr (HasLightCompareType<T>) {
return static_cast<typename T::LightCompareType*>(nullptr);
} else {
return static_cast<Default*>(nullptr);
}
}
} // namespace impl
template <typename T, typename Default>
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree {
public:
using ImplType = impl::IntrusiveRedBlackTreeImpl;
private:
ImplType impl{};
public:
template <bool Const>
class Iterator;
using value_type = T;
using size_type = size_t;
using difference_type = ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
using light_value_type = LightCompareType<Comparator, value_type>;
using const_light_pointer = const light_value_type*;
using const_light_reference = const light_value_type&;
template <bool Const>
class Iterator {
public:
friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
using ImplIterator =
std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = typename IntrusiveRedBlackTree::value_type;
using difference_type = typename IntrusiveRedBlackTree::difference_type;
using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
IntrusiveRedBlackTree::pointer>;
using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
IntrusiveRedBlackTree::reference>;
private:
ImplIterator iterator;
private:
explicit Iterator(ImplIterator it) : iterator(it) {}
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
ImplType::iterator>::type::pointer ptr)
: iterator(ptr) {}
ImplIterator GetImplIterator() const {
return this->iterator;
}
public:
bool operator==(const Iterator& rhs) const {
return this->iterator == rhs.iterator;
}
bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
pointer operator->() const {
return Traits::GetParent(std::addressof(*this->iterator));
}
reference operator*() const {
return *Traits::GetParent(std::addressof(*this->iterator));
}
Iterator& operator++() {
++this->iterator;
return *this;
}
Iterator& operator--() {
--this->iterator;
return *this;
}
Iterator operator++(int) {
const Iterator it{*this};
++this->iterator;
return it;
}
Iterator operator--(int) {
const Iterator it{*this};
--this->iterator;
return it;
}
operator Iterator<true>() const {
return Iterator<true>(this->iterator);
}
};
private:
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
}
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
}
// Define accessors using RB_* functions.
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
return RB_INSERT(&impl.root, node, CompareImpl);
}
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
static_cast<const void*>(lelm), LightCompareImpl);
}
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
static_cast<const void*>(lelm), LightCompareImpl);
}
public:
constexpr IntrusiveRedBlackTree() = default;
// Iterator accessors.
iterator begin() {
return iterator(this->impl.begin());
}
const_iterator begin() const {
return const_iterator(this->impl.begin());
}
iterator end() {
return iterator(this->impl.end());
}
const_iterator end() const {
return const_iterator(this->impl.end());
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
iterator iterator_to(reference ref) {
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
const_iterator iterator_to(const_reference ref) const {
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
// Content management.
bool empty() const {
return this->impl.empty();
}
reference back() {
return *Traits::GetParent(std::addressof(this->impl.back()));
}
const_reference back() const {
return *Traits::GetParent(std::addressof(this->impl.back()));
}
reference front() {
return *Traits::GetParent(std::addressof(this->impl.front()));
}
const_reference front() const {
return *Traits::GetParent(std::addressof(this->impl.front()));
}
iterator erase(iterator it) {
return iterator(this->impl.erase(it.GetImplIterator()));
}
iterator insert(reference ref) {
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
this->InsertImpl(node);
return iterator(node);
}
iterator find(const_reference ref) const {
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
}
iterator nfind(const_reference ref) const {
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
}
iterator find_light(const_light_reference ref) const {
return iterator(this->FindLightImpl(std::addressof(ref)));
}
iterator nfind_light(const_light_reference ref) const {
return iterator(this->NFindLightImpl(std::addressof(ref)));
}
};
template <auto T, class Derived = impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraits;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
public:
template <class Comparator>
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
private:
template <class, class, class>
friend class IntrusiveRedBlackTree;
friend class impl::IntrusiveRedBlackTreeImpl;
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
return std::addressof(parent->*Member);
}
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
return std::addressof(parent->*Member);
}
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
private:
static constexpr TypedStorage<Derived> DerivedStorage = {};
static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
};
template <auto T, class Derived = impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
public:
template <class Comparator>
using TreeType =
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
static constexpr bool IsValid() {
TypedStorage<Derived> DerivedStorage = {};
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
}
private:
template <class, class, class>
friend class IntrusiveRedBlackTree;
friend class impl::IntrusiveRedBlackTreeImpl;
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
return std::addressof(parent->*Member);
}
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
return std::addressof(parent->*Member);
}
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return GetParentPointer<Member, Derived>(node);
}
};
template <class Derived>
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
public:
constexpr Derived* GetPrev() {
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
}
constexpr const Derived* GetPrev() const {
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
}
constexpr Derived* GetNext() {
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
}
constexpr const Derived* GetNext() const {
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
}
};
template <class Derived>
class IntrusiveRedBlackTreeBaseTraits {
public:
template <class Comparator>
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
private:
template <class, class, class>
friend class IntrusiveRedBlackTree;
friend class impl::IntrusiveRedBlackTreeImpl;
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
}
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
}
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
return static_cast<Derived*>(node);
}
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
return static_cast<const Derived*>(node);
}
};
} // namespace Common

View File

@@ -145,10 +145,18 @@ void ColorConsoleBackend::Write(const Entry& entry) {
PrintColoredMessage(entry);
}
// _SH_DENYWR allows read only access to the file for other programs.
// It is #defined to 0 on other platforms
FileBackend::FileBackend(const std::string& filename)
: file(filename, "w", _SH_DENYWR), bytes_written(0) {}
FileBackend::FileBackend(const std::string& filename) : bytes_written(0) {
if (Common::FS::Exists(filename + ".old.txt")) {
Common::FS::Delete(filename + ".old.txt");
}
if (Common::FS::Exists(filename)) {
Common::FS::Rename(filename, filename + ".old.txt");
}
// _SH_DENYWR allows read only access to the file for other programs.
// It is #defined to 0 on other platforms
file = Common::FS::IOFile(filename, "w", _SH_DENYWR);
}
void FileBackend::Write(const Entry& entry) {
// prevent logs from going over the maximum size (in case its spamming and the user doesn't

View File

@@ -1,11 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/memory_hook.h"
namespace Common {
MemoryHook::~MemoryHook() = default;
} // namespace Common

View File

@@ -1,47 +0,0 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <optional>
#include "common/common_types.h"
namespace Common {
/**
* Memory hooks have two purposes:
* 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement
* texture forwarding and memory breakpoints for debugging.
* 2. To allow for the implementation of MMIO devices.
*
* A hook may be mapped to multiple regions of memory.
*
* If a std::nullopt or false is returned from a function, the read/write request is passed through
* to the underlying memory region.
*/
class MemoryHook {
public:
virtual ~MemoryHook();
virtual std::optional<bool> IsValidAddress(VAddr addr) = 0;
virtual std::optional<u8> Read8(VAddr addr) = 0;
virtual std::optional<u16> Read16(VAddr addr) = 0;
virtual std::optional<u32> Read32(VAddr addr) = 0;
virtual std::optional<u64> Read64(VAddr addr) = 0;
virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size) = 0;
virtual bool Write8(VAddr addr, u8 data) = 0;
virtual bool Write16(VAddr addr, u16 data) = 0;
virtual bool Write32(VAddr addr, u32 data) = 0;
virtual bool Write64(VAddr addr, u64 data) = 0;
virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size) = 0;
};
using MemoryHookPointer = std::shared_ptr<MemoryHook>;
} // namespace Common

View File

@@ -0,0 +1,27 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <filesystem>
#include <stdlib.h>
#include <fmt/format.h>
#include "common/file_util.h"
#include "common/nvidia_flags.h"
namespace Common {
void ConfigureNvidiaEnvironmentFlags() {
#ifdef _WIN32
const std::string shader_path = Common::FS::SanitizePath(
fmt::format("{}/nvidia/", Common::FS::GetUserPath(Common::FS::UserPath::ShaderDir)));
const std::string windows_path =
Common::FS::SanitizePath(shader_path, Common::FS::DirectorySeparator::BackwardSlash);
void(Common::FS::CreateFullPath(shader_path + '/'));
void(_putenv(fmt::format("__GL_SHADER_DISK_CACHE_PATH={}", windows_path).c_str()));
void(_putenv("__GL_SHADER_DISK_CACHE_SKIP_CLEANUP=1"));
#endif
}
} // namespace Common

10
src/common/nvidia_flags.h Normal file
View File

@@ -0,0 +1,10 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
namespace Common {
/// Configure platform specific flags for Nvidia's driver
void ConfigureNvidiaEnvironmentFlags();
} // namespace Common

View File

@@ -10,16 +10,10 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
bool has_attribute) {
const std::size_t num_page_table_entries{1ULL
<< (address_space_width_in_bits - page_size_in_bits)};
void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
if (has_attribute) {
attributes.resize(num_page_table_entries);
}
}
} // namespace Common

View File

@@ -4,10 +4,10 @@
#pragma once
#include <atomic>
#include <tuple>
#include "common/common_types.h"
#include "common/memory_hook.h"
#include "common/virtual_buffer.h"
namespace Common {
@@ -20,27 +20,6 @@ enum class PageType : u8 {
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
Special,
/// Page is allocated for use.
Allocated,
};
struct SpecialRegion {
enum class Type {
DebugHook,
IODevice,
} type;
MemoryHookPointer handler;
[[nodiscard]] bool operator<(const SpecialRegion& other) const {
return std::tie(type, handler) < std::tie(other.type, other.handler);
}
[[nodiscard]] bool operator==(const SpecialRegion& other) const {
return std::tie(type, handler) == std::tie(other.type, other.handler);
}
};
/**
@@ -48,6 +27,59 @@ struct SpecialRegion {
* mimics the way a real CPU page table works.
*/
struct PageTable {
/// Number of bits reserved for attribute tagging.
/// This can be at most the guaranteed alignment of the pointers in the page table.
static constexpr int ATTRIBUTE_BITS = 2;
/**
* Pair of host pointer and page type attribute.
* This uses the lower bits of a given pointer to store the attribute tag.
* Writing and reading the pointer attribute pair is guaranteed to be atomic for the same method
* call. In other words, they are guaranteed to be synchronized at all times.
*/
class PageInfo {
public:
/// Returns the page pointer
[[nodiscard]] u8* Pointer() const noexcept {
return ExtractPointer(raw.load(std::memory_order_relaxed));
}
/// Returns the page type attribute
[[nodiscard]] PageType Type() const noexcept {
return ExtractType(raw.load(std::memory_order_relaxed));
}
/// Returns the page pointer and attribute pair, extracted from the same atomic read
[[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept {
const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
}
/// Returns the raw representation of the page information.
/// Use ExtractPointer and ExtractType to unpack the value.
[[nodiscard]] uintptr_t Raw() const noexcept {
return raw.load(std::memory_order_relaxed);
}
/// Write a page pointer and type pair atomically
void Store(u8* pointer, PageType type) noexcept {
raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type));
}
/// Unpack a pointer from a page info raw representation
[[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept {
return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS));
}
/// Unpack a page type from a page info raw representation
[[nodiscard]] static PageType ExtractType(uintptr_t raw) noexcept {
return static_cast<PageType>(raw & ((uintptr_t{1} << ATTRIBUTE_BITS) - 1));
}
private:
std::atomic<uintptr_t> raw;
};
PageTable();
~PageTable() noexcept;
@@ -58,25 +90,21 @@ struct PageTable {
PageTable& operator=(PageTable&&) noexcept = default;
/**
* Resizes the page table to be able to accomodate enough pages within
* Resizes the page table to be able to accommodate enough pages within
* a given address space.
*
* @param address_space_width_in_bits The address size width in bits.
* @param page_size_in_bits The page size in bits.
* @param has_attribute Whether or not this page has any backing attributes.
*/
void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
bool has_attribute);
void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
/**
* Vector of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` vector is of type `Memory`.
* corresponding attribute element is of type `Memory`.
*/
VirtualBuffer<u8*> pointers;
VirtualBuffer<PageInfo> pointers;
VirtualBuffer<u64> backing_addr;
VirtualBuffer<PageType> attributes;
};
} // namespace Common

View File

@@ -0,0 +1,191 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <type_traits>
#include "common/assert.h"
#include "common/common_types.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
std::aligned_storage_t<Size, Align> storage_;
};
} // namespace detail
template <typename T>
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
template <typename T>
static constexpr T* GetPointer(TypedStorage<T>& ts) {
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
}
template <typename T>
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
}
namespace impl {
template <size_t MaxDepth>
struct OffsetOfUnionHolder {
template <typename ParentType, typename MemberType, size_t Offset>
union UnionImpl {
using PaddingMember = char;
static constexpr size_t GetOffset() {
return Offset;
}
#pragma pack(push, 1)
struct {
PaddingMember padding[Offset];
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
#pragma pack(pop)
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, 0> {
static constexpr size_t GetOffset() {
return 0;
}
struct {
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
} data;
UnionImpl<ParentType, MemberType, 1> next_union;
};
template <typename ParentType, typename MemberType>
union UnionImpl<ParentType, MemberType, MaxDepth> {};
};
template <typename ParentType, typename MemberType>
struct OffsetOfCalculator {
using UnionHolder =
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
0>;
union Union {
char c{};
UnionHolder first_union;
TypedStorage<ParentType> parent;
constexpr Union() : c() {}
};
static constexpr Union U = {};
static constexpr const MemberType* GetNextAddress(const MemberType* start,
const MemberType* target) {
while (start < target) {
start++;
}
return start;
}
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
const MemberType* target) {
return (target - start) * sizeof(MemberType);
}
template <typename CurUnion>
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
CurUnion& cur_union) {
constexpr size_t Offset = CurUnion::GetOffset();
const auto target = std::addressof(GetPointer(U.parent)->*member);
const auto start = std::addressof(cur_union.data.members[0]);
const auto next = GetNextAddress(start, target);
if (next != target) {
if constexpr (Offset < sizeof(MemberType) - 1) {
return OffsetOfImpl(member, cur_union.next_union);
} else {
UNREACHABLE();
}
}
return (next - start) * sizeof(MemberType) + Offset;
}
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
return OffsetOfImpl(member, U.first_union);
}
};
template <typename T>
struct GetMemberPointerTraits;
template <typename P, typename M>
struct GetMemberPointerTraits<M P::*> {
using Parent = P;
using Member = M;
};
template <auto MemberPtr>
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
template <auto MemberPtr>
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
static inline std::ptrdiff_t OffsetOf = [] {
using DeducedParentType = GetParentType<MemberPtr>;
using MemberType = GetMemberType<MemberPtr>;
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
std::is_same<RealParentType, DeducedParentType>::value);
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
}();
} // namespace impl
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
return *static_cast<RealParentType*>(
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
return *static_cast<const RealParentType*>(static_cast<const void*>(
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
}
} // namespace Common

View File

@@ -49,3 +49,9 @@ ScopeExitHelper<Func> ScopeExit(Func&& func) {
* \endcode
*/
#define SCOPE_EXIT(body) auto CONCAT2(scope_exit_helper_, __LINE__) = detail::ScopeExit([&]() body)
/**
* This macro is similar to SCOPE_EXIT, except the object is caller managed. This is intended to be
* used when the caller might want to cancel the ScopeExit.
*/
#define SCOPE_GUARD(body) detail::ScopeExit([&]() body)

View File

@@ -394,7 +394,7 @@ public:
template <typename S, typename T2, typename F2>
friend S operator%(const S& p, const swapped_t v);
// Arithmetics + assignements
// Arithmetics + assignments
template <typename S, typename T2, typename F2>
friend S operator+=(const S& p, const swapped_t v);
@@ -451,7 +451,7 @@ S operator%(const S& i, const swap_struct_t<T, F> v) {
return i % v.swap();
}
// Arithmetics + assignements
// Arithmetics + assignments
template <typename S, typename T, typename F>
S& operator+=(S& i, const swap_struct_t<T, F> v) {
i += v.swap();

View File

@@ -1,159 +0,0 @@
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <ctime>
#include <fmt/format.h>
#include "common/common_types.h"
#include "common/string_util.h"
#include "common/timer.h"
namespace Common {
std::chrono::milliseconds Timer::GetTimeMs() {
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
}
// --------------------------------------------
// Initiate, Start, Stop, and Update the time
// --------------------------------------------
// Set initial values for the class
Timer::Timer() : m_LastTime(0), m_StartTime(0), m_Running(false) {
Update();
}
// Write the starting time
void Timer::Start() {
m_StartTime = GetTimeMs();
m_Running = true;
}
// Stop the timer
void Timer::Stop() {
// Write the final time
m_LastTime = GetTimeMs();
m_Running = false;
}
// Update the last time variable
void Timer::Update() {
m_LastTime = GetTimeMs();
// TODO(ector) - QPF
}
// -------------------------------------
// Get time difference and elapsed time
// -------------------------------------
// Get the number of milliseconds since the last Update()
std::chrono::milliseconds Timer::GetTimeDifference() {
return GetTimeMs() - m_LastTime;
}
// Add the time difference since the last Update() to the starting time.
// This is used to compensate for a paused game.
void Timer::AddTimeDifference() {
m_StartTime += GetTimeDifference();
}
// Get the time elapsed since the Start()
std::chrono::milliseconds Timer::GetTimeElapsed() {
// If we have not started yet, return 1 (because then I don't
// have to change the FPS calculation in CoreRerecording.cpp .
if (m_StartTime.count() == 0)
return std::chrono::milliseconds(1);
// Return the final timer time if the timer is stopped
if (!m_Running)
return (m_LastTime - m_StartTime);
return (GetTimeMs() - m_StartTime);
}
// Get the formatted time elapsed since the Start()
std::string Timer::GetTimeElapsedFormatted() const {
// If we have not started yet, return zero
if (m_StartTime.count() == 0)
return "00:00:00:000";
// The number of milliseconds since the start.
// Use a different value if the timer is stopped.
std::chrono::milliseconds Milliseconds;
if (m_Running)
Milliseconds = GetTimeMs() - m_StartTime;
else
Milliseconds = m_LastTime - m_StartTime;
// Seconds
std::chrono::seconds Seconds = std::chrono::duration_cast<std::chrono::seconds>(Milliseconds);
// Minutes
std::chrono::minutes Minutes = std::chrono::duration_cast<std::chrono::minutes>(Milliseconds);
// Hours
std::chrono::hours Hours = std::chrono::duration_cast<std::chrono::hours>(Milliseconds);
std::string TmpStr = fmt::format("{:02}:{:02}:{:02}:{:03}", Hours.count(), Minutes.count() % 60,
Seconds.count() % 60, Milliseconds.count() % 1000);
return TmpStr;
}
// Get the number of seconds since January 1 1970
std::chrono::seconds Timer::GetTimeSinceJan1970() {
return std::chrono::duration_cast<std::chrono::seconds>(GetTimeMs());
}
std::chrono::seconds Timer::GetLocalTimeSinceJan1970() {
time_t sysTime, tzDiff, tzDST;
struct tm* gmTime;
time(&sysTime);
// Account for DST where needed
gmTime = localtime(&sysTime);
if (gmTime->tm_isdst == 1)
tzDST = 3600;
else
tzDST = 0;
// Lazy way to get local time in sec
gmTime = gmtime(&sysTime);
tzDiff = sysTime - mktime(gmTime);
return std::chrono::seconds(sysTime + tzDiff + tzDST);
}
// Return the current time formatted as Minutes:Seconds:Milliseconds
// in the form 00:00:000.
std::string Timer::GetTimeFormatted() {
time_t sysTime;
struct tm* gmTime;
char tmp[13];
time(&sysTime);
gmTime = localtime(&sysTime);
strftime(tmp, 6, "%M:%S", gmTime);
u64 milliseconds = static_cast<u64>(GetTimeMs().count()) % 1000;
return fmt::format("{}:{:03}", tmp, milliseconds);
}
// Returns a timestamp with decimals for precise time comparisons
// ----------------
double Timer::GetDoubleTime() {
// Get continuous timestamp
auto tmp_seconds = static_cast<u64>(GetTimeSinceJan1970().count());
const auto ms = static_cast<double>(static_cast<u64>(GetTimeMs().count()) % 1000);
// Remove a few years. We only really want enough seconds to make
// sure that we are detecting actual actions, perhaps 60 seconds is
// enough really, but I leave a year of seconds anyway, in case the
// user's clock is incorrect or something like that.
tmp_seconds = tmp_seconds - (38 * 365 * 24 * 60 * 60);
// Make a smaller integer that fits in the double
const auto seconds = static_cast<u32>(tmp_seconds);
return seconds + ms;
}
} // Namespace Common

View File

@@ -1,41 +0,0 @@
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <chrono>
#include <string>
#include "common/common_types.h"
namespace Common {
class Timer {
public:
Timer();
void Start();
void Stop();
void Update();
// The time difference is always returned in milliseconds, regardless of alternative internal
// representation
[[nodiscard]] std::chrono::milliseconds GetTimeDifference();
void AddTimeDifference();
[[nodiscard]] static std::chrono::seconds GetTimeSinceJan1970();
[[nodiscard]] static std::chrono::seconds GetLocalTimeSinceJan1970();
[[nodiscard]] static double GetDoubleTime();
[[nodiscard]] static std::string GetTimeFormatted();
[[nodiscard]] std::string GetTimeElapsedFormatted() const;
[[nodiscard]] std::chrono::milliseconds GetTimeElapsed();
[[nodiscard]] static std::chrono::milliseconds GetTimeMs();
private:
std::chrono::milliseconds m_LastTime;
std::chrono::milliseconds m_StartTime;
bool m_Running;
};
} // Namespace Common

674
src/common/tree.h Normal file
View File

@@ -0,0 +1,674 @@
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
/* $FreeBSD$ */
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
/*
* This file defines data structures for red-black trees.
*
* A red-black tree is a binary search tree with the node color as an
* extra attribute. It fulfills a set of conditions:
* - every search path from the root to a leaf consists of the
* same number of black nodes,
* - each red node (except for the root) has a black parent,
* - each leaf node is black.
*
* Every operation on a red-black tree is bounded as O(lg n).
* The maximum height of a red-black tree is 2lg (n+1).
*/
namespace Common {
template <typename T>
class RBHead {
public:
[[nodiscard]] T* Root() {
return rbh_root;
}
[[nodiscard]] const T* Root() const {
return rbh_root;
}
void SetRoot(T* root) {
rbh_root = root;
}
[[nodiscard]] bool IsEmpty() const {
return Root() == nullptr;
}
private:
T* rbh_root = nullptr;
};
enum class EntryColor {
Black,
Red,
};
template <typename T>
class RBEntry {
public:
[[nodiscard]] T* Left() {
return rbe_left;
}
[[nodiscard]] const T* Left() const {
return rbe_left;
}
void SetLeft(T* left) {
rbe_left = left;
}
[[nodiscard]] T* Right() {
return rbe_right;
}
[[nodiscard]] const T* Right() const {
return rbe_right;
}
void SetRight(T* right) {
rbe_right = right;
}
[[nodiscard]] T* Parent() {
return rbe_parent;
}
[[nodiscard]] const T* Parent() const {
return rbe_parent;
}
void SetParent(T* parent) {
rbe_parent = parent;
}
[[nodiscard]] bool IsBlack() const {
return rbe_color == EntryColor::Black;
}
[[nodiscard]] bool IsRed() const {
return rbe_color == EntryColor::Red;
}
[[nodiscard]] EntryColor Color() const {
return rbe_color;
}
void SetColor(EntryColor color) {
rbe_color = color;
}
private:
T* rbe_left = nullptr;
T* rbe_right = nullptr;
T* rbe_parent = nullptr;
EntryColor rbe_color{};
};
template <typename Node>
[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
return node->GetEntry();
}
template <typename Node>
[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
return node->GetEntry();
}
template <typename Node>
[[nodiscard]] Node* RB_PARENT(Node* node) {
return RB_ENTRY(node).Parent();
}
template <typename Node>
[[nodiscard]] const Node* RB_PARENT(const Node* node) {
return RB_ENTRY(node).Parent();
}
template <typename Node>
void RB_SET_PARENT(Node* node, Node* parent) {
return RB_ENTRY(node).SetParent(parent);
}
template <typename Node>
[[nodiscard]] Node* RB_LEFT(Node* node) {
return RB_ENTRY(node).Left();
}
template <typename Node>
[[nodiscard]] const Node* RB_LEFT(const Node* node) {
return RB_ENTRY(node).Left();
}
template <typename Node>
void RB_SET_LEFT(Node* node, Node* left) {
return RB_ENTRY(node).SetLeft(left);
}
template <typename Node>
[[nodiscard]] Node* RB_RIGHT(Node* node) {
return RB_ENTRY(node).Right();
}
template <typename Node>
[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
return RB_ENTRY(node).Right();
}
template <typename Node>
void RB_SET_RIGHT(Node* node, Node* right) {
return RB_ENTRY(node).SetRight(right);
}
template <typename Node>
[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
return RB_ENTRY(node).IsBlack();
}
template <typename Node>
[[nodiscard]] bool RB_IS_RED(const Node* node) {
return RB_ENTRY(node).IsRed();
}
template <typename Node>
[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
return RB_ENTRY(node).Color();
}
template <typename Node>
void RB_SET_COLOR(Node* node, EntryColor color) {
return RB_ENTRY(node).SetColor(color);
}
template <typename Node>
void RB_SET(Node* node, Node* parent) {
auto& entry = RB_ENTRY(node);
entry.SetParent(parent);
entry.SetLeft(nullptr);
entry.SetRight(nullptr);
entry.SetColor(EntryColor::Red);
}
template <typename Node>
void RB_SET_BLACKRED(Node* black, Node* red) {
RB_SET_COLOR(black, EntryColor::Black);
RB_SET_COLOR(red, EntryColor::Red);
}
template <typename Node>
void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
tmp = RB_RIGHT(elm);
RB_SET_RIGHT(elm, RB_LEFT(tmp));
if (RB_RIGHT(elm) != nullptr) {
RB_SET_PARENT(RB_LEFT(tmp), elm);
}
RB_SET_PARENT(tmp, RB_PARENT(elm));
if (RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
head->SetRoot(tmp);
}
RB_SET_LEFT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
template <typename Node>
void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
tmp = RB_LEFT(elm);
RB_SET_LEFT(elm, RB_RIGHT(tmp));
if (RB_LEFT(elm) != nullptr) {
RB_SET_PARENT(RB_RIGHT(tmp), elm);
}
RB_SET_PARENT(tmp, RB_PARENT(elm));
if (RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
head->SetRoot(tmp);
}
RB_SET_RIGHT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
template <typename Node>
void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
Node* parent = nullptr;
Node* tmp = nullptr;
while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
Node* gparent = RB_PARENT(parent);
if (parent == RB_LEFT(gparent)) {
tmp = RB_RIGHT(gparent);
if (tmp && RB_IS_RED(tmp)) {
RB_SET_COLOR(tmp, EntryColor::Black);
RB_SET_BLACKRED(parent, gparent);
elm = gparent;
continue;
}
if (RB_RIGHT(parent) == elm) {
RB_ROTATE_LEFT(head, parent, tmp);
tmp = parent;
parent = elm;
elm = tmp;
}
RB_SET_BLACKRED(parent, gparent);
RB_ROTATE_RIGHT(head, gparent, tmp);
} else {
tmp = RB_LEFT(gparent);
if (tmp && RB_IS_RED(tmp)) {
RB_SET_COLOR(tmp, EntryColor::Black);
RB_SET_BLACKRED(parent, gparent);
elm = gparent;
continue;
}
if (RB_LEFT(parent) == elm) {
RB_ROTATE_RIGHT(head, parent, tmp);
tmp = parent;
parent = elm;
elm = tmp;
}
RB_SET_BLACKRED(parent, gparent);
RB_ROTATE_LEFT(head, gparent, tmp);
}
}
RB_SET_COLOR(head->Root(), EntryColor::Black);
}
template <typename Node>
void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
Node* tmp;
while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root()) {
if (RB_LEFT(parent) == elm) {
tmp = RB_RIGHT(parent);
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_LEFT(head, parent, tmp);
tmp = RB_RIGHT(parent);
}
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, EntryColor::Red);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
Node* oleft;
if ((oleft = RB_LEFT(tmp)) != nullptr) {
RB_SET_COLOR(oleft, EntryColor::Black);
}
RB_SET_COLOR(tmp, EntryColor::Red);
RB_ROTATE_RIGHT(head, tmp, oleft);
tmp = RB_RIGHT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, EntryColor::Black);
if (RB_RIGHT(tmp)) {
RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
}
RB_ROTATE_LEFT(head, parent, tmp);
elm = head->Root();
break;
}
} else {
tmp = RB_LEFT(parent);
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_RIGHT(head, parent, tmp);
tmp = RB_LEFT(parent);
}
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
RB_SET_COLOR(tmp, EntryColor::Red);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
Node* oright;
if ((oright = RB_RIGHT(tmp)) != nullptr) {
RB_SET_COLOR(oright, EntryColor::Black);
}
RB_SET_COLOR(tmp, EntryColor::Red);
RB_ROTATE_LEFT(head, tmp, oright);
tmp = RB_LEFT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
RB_SET_COLOR(parent, EntryColor::Black);
if (RB_LEFT(tmp)) {
RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
}
RB_ROTATE_RIGHT(head, parent, tmp);
elm = head->Root();
break;
}
}
}
if (elm) {
RB_SET_COLOR(elm, EntryColor::Black);
}
}
template <typename Node>
Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
Node* child = nullptr;
Node* parent = nullptr;
Node* old = elm;
EntryColor color{};
const auto finalize = [&] {
if (color == EntryColor::Black) {
RB_REMOVE_COLOR(head, parent, child);
}
return old;
};
if (RB_LEFT(elm) == nullptr) {
child = RB_RIGHT(elm);
} else if (RB_RIGHT(elm) == nullptr) {
child = RB_LEFT(elm);
} else {
Node* left;
elm = RB_RIGHT(elm);
while ((left = RB_LEFT(elm)) != nullptr) {
elm = left;
}
child = RB_RIGHT(elm);
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head->SetRoot(child);
}
if (RB_PARENT(elm) == old) {
parent = elm;
}
elm->SetEntry(old->GetEntry());
if (RB_PARENT(old)) {
if (RB_LEFT(RB_PARENT(old)) == old) {
RB_SET_LEFT(RB_PARENT(old), elm);
} else {
RB_SET_RIGHT(RB_PARENT(old), elm);
}
} else {
head->SetRoot(elm);
}
RB_SET_PARENT(RB_LEFT(old), elm);
if (RB_RIGHT(old)) {
RB_SET_PARENT(RB_RIGHT(old), elm);
}
if (parent) {
left = parent;
}
return finalize();
}
parent = RB_PARENT(elm);
color = RB_COLOR(elm);
if (child) {
RB_SET_PARENT(child, parent);
}
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
} else {
RB_SET_RIGHT(parent, child);
}
} else {
head->SetRoot(child);
}
return finalize();
}
// Inserts a node into the RB tree
template <typename Node, typename CompareFunction>
Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* parent = nullptr;
Node* tmp = head->Root();
int comp = 0;
while (tmp) {
parent = tmp;
comp = cmp(elm, parent);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
RB_SET(elm, parent);
if (parent != nullptr) {
if (comp < 0) {
RB_SET_LEFT(parent, elm);
} else {
RB_SET_RIGHT(parent, elm);
}
} else {
head->SetRoot(elm);
}
RB_INSERT_COLOR(head, elm);
return nullptr;
}
// Finds the node with the same key as elm
template <typename Node, typename CompareFunction>
Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* tmp = head->Root();
while (tmp) {
const int comp = cmp(elm, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
return nullptr;
}
// Finds the first node greater than or equal to the search key
template <typename Node, typename CompareFunction>
Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
Node* tmp = head->Root();
Node* res = nullptr;
while (tmp) {
const int comp = cmp(elm, tmp);
if (comp < 0) {
res = tmp;
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
return res;
}
// Finds the node with the same key as lelm
template <typename Node, typename CompareFunction>
Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
Node* tmp = head->Root();
while (tmp) {
const int comp = lcmp(lelm, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
return nullptr;
}
// Finds the first node greater than or equal to the search key
template <typename Node, typename CompareFunction>
Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
Node* tmp = head->Root();
Node* res = nullptr;
while (tmp) {
const int comp = lcmp(lelm, tmp);
if (comp < 0) {
res = tmp;
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
tmp = RB_RIGHT(tmp);
} else {
return tmp;
}
}
return res;
}
template <typename Node>
Node* RB_NEXT(Node* elm) {
if (RB_RIGHT(elm)) {
elm = RB_RIGHT(elm);
while (RB_LEFT(elm)) {
elm = RB_LEFT(elm);
}
} else {
if (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
elm = RB_PARENT(elm);
} else {
while (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
elm = RB_PARENT(elm);
}
elm = RB_PARENT(elm);
}
}
return elm;
}
template <typename Node>
Node* RB_PREV(Node* elm) {
if (RB_LEFT(elm)) {
elm = RB_LEFT(elm);
while (RB_RIGHT(elm)) {
elm = RB_RIGHT(elm);
}
} else {
if (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
elm = RB_PARENT(elm);
} else {
while (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
elm = RB_PARENT(elm);
}
elm = RB_PARENT(elm);
}
}
return elm;
}
template <typename Node>
Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
Node* tmp = head->Root();
Node* parent = nullptr;
while (tmp) {
parent = tmp;
if (is_min) {
tmp = RB_LEFT(tmp);
} else {
tmp = RB_RIGHT(tmp);
}
}
return parent;
}
template <typename Node>
Node* RB_MIN(RBHead<Node>* head) {
return RB_MINMAX(head, true);
}
template <typename Node>
Node* RB_MAX(RBHead<Node>* head) {
return RB_MINMAX(head, false);
}
} // namespace Common

View File

@@ -14,8 +14,8 @@ constexpr u128 INVALID_UUID{{0, 0}};
struct UUID {
// UUIDs which are 0 are considered invalid!
u128 uuid = INVALID_UUID;
constexpr UUID() = default;
u128 uuid;
UUID() = default;
constexpr explicit UUID(const u128& id) : uuid{id} {}
constexpr explicit UUID(const u64 lo, const u64 hi) : uuid{{lo, hi}} {}

View File

@@ -15,10 +15,12 @@ void FreeMemoryPages(void* base, std::size_t size) noexcept;
template <typename T>
class VirtualBuffer final {
public:
static_assert(
std::is_trivially_constructible_v<T>,
"T must be trivially constructible, as non-trivial constructors will not be executed "
"with the current allocator");
// TODO: Uncomment this and change Common::PageTable::PageInfo to be trivially constructible
// using std::atomic_ref once libc++ has support for it
// static_assert(
// std::is_trivially_constructible_v<T>,
// "T must be trivially constructible, as non-trivial constructors will not be executed "
// "with the current allocator");
constexpr VirtualBuffer() = default;
explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {

View File

@@ -2,19 +2,74 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <array>
#include <chrono>
#include <limits>
#include <mutex>
#include <thread>
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(__umulh)
#pragma intrinsic(_udiv128)
#else
#include <x86intrin.h>
#endif
#include "common/atomic_ops.h"
#include "common/uint128.h"
#include "common/x64/native_clock.h"
namespace {
[[nodiscard]] u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) {
#ifdef __SIZEOF_INT128__
const auto base = static_cast<unsigned __int128>(numerator) << 64ULL;
return static_cast<u64>(base / divisor);
#elif defined(_M_X64) || defined(_M_ARM64)
std::array<u64, 2> r = {0, numerator};
u64 remainder;
#if _MSC_VER < 1923
return udiv128(r[1], r[0], divisor, &remainder);
#else
return _udiv128(r[1], r[0], divisor, &remainder);
#endif
#else
// This one is bit more inaccurate.
return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
#endif
}
[[nodiscard]] u64 MultiplyHigh(u64 a, u64 b) {
#ifdef __SIZEOF_INT128__
return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64;
#elif defined(_M_X64) || defined(_M_ARM64)
return __umulh(a, b); // MSVC
#else
// Generic fallback
const u64 a_lo = u32(a);
const u64 a_hi = a >> 32;
const u64 b_lo = u32(b);
const u64 b_hi = b >> 32;
const u64 a_x_b_hi = a_hi * b_hi;
const u64 a_x_b_mid = a_hi * b_lo;
const u64 b_x_a_mid = b_hi * a_lo;
const u64 a_x_b_lo = a_lo * b_lo;
const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) +
static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >>
32;
const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
return multhi;
#endif
}
} // namespace
namespace Common {
u64 EstimateRDTSCFrequency() {
@@ -48,54 +103,71 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
rtsc_frequency_} {
_mm_mfence();
last_measure = __rdtsc();
accumulated_ticks = 0U;
time_point.inner.last_measure = __rdtsc();
time_point.inner.accumulated_ticks = 0U;
ns_rtsc_factor = GetFixedPoint64Factor(1000000000, rtsc_frequency);
us_rtsc_factor = GetFixedPoint64Factor(1000000, rtsc_frequency);
ms_rtsc_factor = GetFixedPoint64Factor(1000, rtsc_frequency);
clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
}
u64 NativeClock::GetRTSC() {
std::scoped_lock scope{rtsc_serialize};
_mm_mfence();
const u64 current_measure = __rdtsc();
u64 diff = current_measure - last_measure;
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
if (current_measure > last_measure) {
last_measure = current_measure;
}
accumulated_ticks += diff;
TimePoint new_time_point{};
TimePoint current_time_point{};
do {
current_time_point.pack = time_point.pack;
_mm_mfence();
const u64 current_measure = __rdtsc();
u64 diff = current_measure - current_time_point.inner.last_measure;
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure
? current_measure
: current_time_point.inner.last_measure;
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
current_time_point.pack));
/// The clock cannot be more precise than the guest timer, remove the lower bits
return accumulated_ticks & inaccuracy_mask;
return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
}
void NativeClock::Pause(bool is_paused) {
if (!is_paused) {
_mm_mfence();
last_measure = __rdtsc();
TimePoint current_time_point{};
TimePoint new_time_point{};
do {
current_time_point.pack = time_point.pack;
new_time_point.pack = current_time_point.pack;
_mm_mfence();
new_time_point.inner.last_measure = __rdtsc();
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
current_time_point.pack));
}
}
std::chrono::nanoseconds NativeClock::GetTimeNS() {
const u64 rtsc_value = GetRTSC();
return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)};
return std::chrono::nanoseconds{MultiplyHigh(rtsc_value, ns_rtsc_factor)};
}
std::chrono::microseconds NativeClock::GetTimeUS() {
const u64 rtsc_value = GetRTSC();
return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)};
return std::chrono::microseconds{MultiplyHigh(rtsc_value, us_rtsc_factor)};
}
std::chrono::milliseconds NativeClock::GetTimeMS() {
const u64 rtsc_value = GetRTSC();
return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)};
return std::chrono::milliseconds{MultiplyHigh(rtsc_value, ms_rtsc_factor)};
}
u64 NativeClock::GetClockCycles() {
const u64 rtsc_value = GetRTSC();
return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency);
return MultiplyHigh(rtsc_value, clock_rtsc_factor);
}
u64 NativeClock::GetCPUCycles() {
const u64 rtsc_value = GetRTSC();
return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency);
return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
}
} // namespace X64

View File

@@ -6,7 +6,6 @@
#include <optional>
#include "common/spin_lock.h"
#include "common/wall_clock.h"
namespace Common {
@@ -32,14 +31,28 @@ public:
private:
u64 GetRTSC();
union alignas(16) TimePoint {
TimePoint() : pack{} {}
u128 pack{};
struct Inner {
u64 last_measure{};
u64 accumulated_ticks{};
} inner;
};
/// value used to reduce the native clocks accuracy as some apss rely on
/// undefined behavior where the level of accuracy in the clock shouldn't
/// be higher.
static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
SpinLock rtsc_serialize{};
u64 last_measure{};
u64 accumulated_ticks{};
TimePoint time_point;
// factors
u64 clock_rtsc_factor{};
u64 cpu_rtsc_factor{};
u64 ns_rtsc_factor{};
u64 us_rtsc_factor{};
u64 ms_rtsc_factor{};
u64 rtsc_frequency;
};
} // namespace X64

View File

@@ -142,8 +142,6 @@ add_library(core STATIC
hardware_interrupt_manager.h
hle/ipc.h
hle/ipc_helpers.h
hle/kernel/address_arbiter.cpp
hle/kernel/address_arbiter.h
hle/kernel/client_port.cpp
hle/kernel/client_port.h
hle/kernel/client_session.cpp
@@ -157,13 +155,33 @@ add_library(core STATIC
hle/kernel/handle_table.h
hle/kernel/hle_ipc.cpp
hle/kernel/hle_ipc.h
hle/kernel/k_address_arbiter.cpp
hle/kernel/k_address_arbiter.h
hle/kernel/k_affinity_mask.h
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_light_condition_variable.h
hle/kernel/k_light_lock.cpp
hle/kernel/k_light_lock.h
hle/kernel/k_priority_queue.h
hle/kernel/k_readable_event.cpp
hle/kernel/k_readable_event.h
hle/kernel/k_resource_limit.cpp
hle/kernel/k_resource_limit.h
hle/kernel/k_scheduler.cpp
hle/kernel/k_scheduler.h
hle/kernel/k_scheduler_lock.h
hle/kernel/k_scoped_lock.h
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_thread_queue.h
hle/kernel/k_writable_event.cpp
hle/kernel/k_writable_event.h
hle/kernel/kernel.cpp
hle/kernel/kernel.h
hle/kernel/memory/address_space_info.cpp
@@ -183,8 +201,6 @@ add_library(core STATIC
hle/kernel/memory/slab_heap.h
hle/kernel/memory/system_control.cpp
hle/kernel/memory/system_control.h
hle/kernel/mutex.cpp
hle/kernel/mutex.h
hle/kernel/object.cpp
hle/kernel/object.h
hle/kernel/physical_core.cpp
@@ -194,10 +210,6 @@ add_library(core STATIC
hle/kernel/process.h
hle/kernel/process_capability.cpp
hle/kernel/process_capability.h
hle/kernel/readable_event.cpp
hle/kernel/readable_event.h
hle/kernel/resource_limit.cpp
hle/kernel/resource_limit.h
hle/kernel/server_port.cpp
hle/kernel/server_port.h
hle/kernel/server_session.cpp
@@ -210,20 +222,14 @@ add_library(core STATIC
hle/kernel/shared_memory.h
hle/kernel/svc.cpp
hle/kernel/svc.h
hle/kernel/svc_common.h
hle/kernel/svc_results.h
hle/kernel/svc_types.h
hle/kernel/svc_wrap.h
hle/kernel/synchronization_object.cpp
hle/kernel/synchronization_object.h
hle/kernel/synchronization.cpp
hle/kernel/synchronization.h
hle/kernel/thread.cpp
hle/kernel/thread.h
hle/kernel/time_manager.cpp
hle/kernel/time_manager.h
hle/kernel/transfer_memory.cpp
hle/kernel/transfer_memory.h
hle/kernel/writable_event.cpp
hle/kernel/writable_event.h
hle/lock.cpp
hle/lock.h
hle/result.h
@@ -400,8 +406,6 @@ add_library(core STATIC
hle/service/ldr/ldr.h
hle/service/lm/lm.cpp
hle/service/lm/lm.h
hle/service/lm/manager.cpp
hle/service/lm/manager.h
hle/service/mig/mig.cpp
hle/service/mig/mig.h
hle/service/mii/manager.cpp
@@ -635,16 +639,17 @@ if (MSVC)
/we4267
# 'context' : truncation from 'type1' to 'type2'
/we4305
# 'function' : not all control paths return a value
/we4715
)
else()
target_compile_options(core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=implicit-fallthrough
-Werror=reorder
-Werror=sign-compare
-Werror=unused-variable
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>

View File

@@ -26,9 +26,10 @@ using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CO
/// Generic ARMv8 CPU interface
class ARM_Interface : NonCopyable {
public:
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
: system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
uses_wall_clock} {}
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
bool uses_wall_clock_)
: system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
uses_wall_clock_} {}
virtual ~ARM_Interface() = default;
struct ThreadContext32 {

View File

@@ -71,15 +71,9 @@ public:
}
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
switch (exception) {
case Dynarmic::A32::Exception::UndefinedInstruction:
case Dynarmic::A32::Exception::UnpredictableInstruction:
break;
case Dynarmic::A32::Exception::Breakpoint:
break;
}
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
LOG_CRITICAL(Core_ARM,
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
UNIMPLEMENTED();
}
@@ -133,6 +127,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
page_table.pointers.data());
config.absolute_offset_page_table = true;
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
@@ -180,6 +175,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
}
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
}
}
return std::make_unique<Dynarmic::A32::Jit>(config);
@@ -258,6 +256,9 @@ void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
}
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
if (!jit) {
return;
}
Dynarmic::A32::Context context;
jit->SaveContext(context);
ctx.cpu_registers = context.Regs();
@@ -267,6 +268,9 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
}
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
if (!jit) {
return;
}
Dynarmic::A32::Context context;
context.Regs() = ctx.cpu_registers;
context.ExtRegs() = ctx.extension_registers;

View File

@@ -50,6 +50,10 @@ public:
u64 GetTPIDR_EL0() const override;
void ChangeProcessorID(std::size_t new_core_id) override;
bool IsInThumbMode() const {
return (GetPSTATE() & 0x20) != 0;
}
void SaveContext(ThreadContext32& ctx) override;
void SaveContext(ThreadContext64& ctx) override {}
void LoadContext(const ThreadContext32& ctx) override;

View File

@@ -152,6 +152,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
// Memory
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
config.page_table_address_space_bits = address_space_bits;
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
config.silently_mirror_page_table = false;
config.absolute_offset_page_table = true;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
@@ -211,6 +212,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
}
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
}
}
return std::make_shared<Dynarmic::A64::Jit>(config);
@@ -290,6 +294,9 @@ void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
}
void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
if (!jit) {
return;
}
ctx.cpu_registers = jit->GetRegisters();
ctx.sp = jit->GetSP();
ctx.pc = jit->GetPC();
@@ -301,6 +308,9 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
}
void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
if (!jit) {
return;
}
jit->SetRegisters(ctx.cpu_registers);
jit->SetSP(ctx.sp);
jit->SetPC(ctx.pc);

View File

@@ -28,15 +28,14 @@
#include "core/hardware_interrupt_manager.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/apm/controller.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/glue/manager.h"
#include "core/hle/service/lm/manager.h"
#include "core/hle/service/service.h"
#include "core/hle/service/sm/sm.h"
#include "core/hle/service/time/time_manager.h"
@@ -293,8 +292,6 @@ struct System::Impl {
perf_stats->GetMeanFrametime());
}
lm_manager.Flush();
is_powered_on = false;
exit_lock = false;
@@ -398,7 +395,6 @@ struct System::Impl {
/// Service State
Service::Glue::ARPManager arp_manager;
Service::LM::Manager lm_manager{reporter};
Service::Time::TimeManager time_manager;
/// Service manager
@@ -720,14 +716,6 @@ const Service::APM::Controller& System::GetAPMController() const {
return impl->apm_controller;
}
Service::LM::Manager& System::GetLogManager() {
return impl->lm_manager;
}
const Service::LM::Manager& System::GetLogManager() const {
return impl->lm_manager;
}
Service::Time::TimeManager& System::GetTimeManager() {
return impl->time_manager;
}

View File

@@ -62,10 +62,6 @@ namespace Glue {
class ARPManager;
}
namespace LM {
class Manager;
} // namespace LM
namespace SM {
class ServiceManager;
} // namespace SM
@@ -351,9 +347,6 @@ public:
[[nodiscard]] Service::APM::Controller& GetAPMController();
[[nodiscard]] const Service::APM::Controller& GetAPMController() const;
[[nodiscard]] Service::LM::Manager& GetLogManager();
[[nodiscard]] const Service::LM::Manager& GetLogManager() const;
[[nodiscard]] Service::Time::TimeManager& GetTimeManager();
[[nodiscard]] const Service::Time::TimeManager& GetTimeManager() const;

View File

@@ -49,6 +49,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
instance.on_thread_init();
instance.ThreadLoop();
MicroProfileOnThreadExit();
}
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {

View File

@@ -11,9 +11,9 @@
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/thread.h"
#include "video_core/gpu.h"
namespace Core {
@@ -147,7 +147,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
while (true) {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
@@ -208,7 +208,6 @@ void CpuManager::SingleCoreRunGuestThread() {
void CpuManager::SingleCoreRunGuestLoop() {
auto& kernel = system.Kernel();
auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
system.EnterDynarmicProfile();
@@ -217,9 +216,9 @@ void CpuManager::SingleCoreRunGuestLoop() {
physical_core = &kernel.CurrentPhysicalCore();
}
system.ExitDynarmicProfile();
thread->SetPhantomMode(true);
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
thread->SetPhantomMode(false);
kernel.SetIsPhantomModeForSingleCore(false);
physical_core->ArmInterface().ClearExclusiveState();
PreemptSingleCore();
auto& scheduler = kernel.Scheduler(current_core);
@@ -245,7 +244,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
while (true) {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
@@ -255,22 +254,23 @@ void CpuManager::SingleCoreRunSuspendThread() {
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
{
auto& scheduler = system.Kernel().Scheduler(current_core);
Kernel::Thread* current_thread = scheduler.GetCurrentThread();
auto& kernel = system.Kernel();
auto& scheduler = kernel.Scheduler(current_core);
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
if (idle_count >= 4 || from_running_enviroment) {
if (!from_running_enviroment) {
system.CoreTiming().Idle();
idle_count = 0;
}
current_thread->SetPhantomMode(true);
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
current_thread->SetPhantomMode(false);
kernel.SetIsPhantomModeForSingleCore(false);
}
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
system.CoreTiming().ResetTicks();
scheduler.Unload(scheduler.GetCurrentThread());
auto& next_scheduler = system.Kernel().Scheduler(current_core);
auto& next_scheduler = kernel.Scheduler(current_core);
Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
}
@@ -278,8 +278,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
{
auto& scheduler = system.Kernel().Scheduler(current_core);
scheduler.Reload(scheduler.GetCurrentThread());
auto* currrent_thread2 = scheduler.GetCurrentThread();
if (!currrent_thread2->IsIdleThread()) {
if (!scheduler.IsIdle()) {
idle_count = 0;
}
}

View File

@@ -143,6 +143,7 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
return 0x3C;
}
UNREACHABLE();
return 0;
}
u64 GetSignatureTypePaddingSize(SignatureType type) {
@@ -157,6 +158,7 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
return 0x40;
}
UNREACHABLE();
return 0;
}
SignatureType Ticket::GetSignatureType() const {
@@ -169,8 +171,7 @@ SignatureType Ticket::GetSignatureType() const {
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
return ticket->sig_type;
}
UNREACHABLE();
throw std::bad_variant_access{};
}
TicketData& Ticket::GetData() {
@@ -183,8 +184,7 @@ TicketData& Ticket::GetData() {
if (auto* ticket = std::get_if<ECDSATicket>(&data)) {
return ticket->data;
}
UNREACHABLE();
throw std::bad_variant_access{};
}
const TicketData& Ticket::GetData() const {
@@ -197,8 +197,7 @@ const TicketData& Ticket::GetData() const {
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
return ticket->data;
}
UNREACHABLE();
throw std::bad_variant_access{};
}
u64 Ticket::GetSize() const {
@@ -569,6 +568,11 @@ KeyManager::KeyManager() {
// Initialize keys
const std::string hactool_keys_dir = Common::FS::GetHactoolConfigurationPath();
const std::string yuzu_keys_dir = Common::FS::GetUserPath(Common::FS::UserPath::KeysDir);
if (!Common::FS::Exists(yuzu_keys_dir)) {
Common::FS::CreateDir(yuzu_keys_dir);
}
if (Settings::values.use_dev_keys) {
dev_mode = true;
AttemptLoadKeyFile(yuzu_keys_dir, hactool_keys_dir, "dev.keys", false);

View File

@@ -43,17 +43,17 @@ static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
struct IVFCHeader {
u32_le magic;
u32_le magic_number;
INSERT_UNION_PADDING_BYTES(8);
INSERT_PADDING_BYTES_NOINIT(8);
std::array<IVFCLevel, 6> levels;
INSERT_UNION_PADDING_BYTES(64);
INSERT_PADDING_BYTES_NOINIT(64);
};
static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
struct NCASectionHeaderBlock {
INSERT_UNION_PADDING_BYTES(3);
INSERT_PADDING_BYTES_NOINIT(3);
NCASectionFilesystemType filesystem_type;
NCASectionCryptoType crypto_type;
INSERT_UNION_PADDING_BYTES(3);
INSERT_PADDING_BYTES_NOINIT(3);
};
static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
@@ -61,7 +61,7 @@ struct NCASectionRaw {
NCASectionHeaderBlock header;
std::array<u8, 0x138> block_data;
std::array<u8, 0x8> section_ctr;
INSERT_UNION_PADDING_BYTES(0xB8);
INSERT_PADDING_BYTES_NOINIT(0xB8);
};
static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
@@ -69,19 +69,19 @@ struct PFS0Superblock {
NCASectionHeaderBlock header_block;
std::array<u8, 0x20> hash;
u32_le size;
INSERT_UNION_PADDING_BYTES(4);
INSERT_PADDING_BYTES_NOINIT(4);
u64_le hash_table_offset;
u64_le hash_table_size;
u64_le pfs0_header_offset;
u64_le pfs0_size;
INSERT_UNION_PADDING_BYTES(0x1B0);
INSERT_PADDING_BYTES_NOINIT(0x1B0);
};
static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
struct RomFSSuperblock {
NCASectionHeaderBlock header_block;
IVFCHeader ivfc;
INSERT_UNION_PADDING_BYTES(0x118);
INSERT_PADDING_BYTES_NOINIT(0x118);
};
static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
@@ -89,19 +89,19 @@ struct BKTRHeader {
u64_le offset;
u64_le size;
u32_le magic;
INSERT_UNION_PADDING_BYTES(0x4);
INSERT_PADDING_BYTES_NOINIT(0x4);
u32_le number_entries;
INSERT_UNION_PADDING_BYTES(0x4);
INSERT_PADDING_BYTES_NOINIT(0x4);
};
static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
struct BKTRSuperblock {
NCASectionHeaderBlock header_block;
IVFCHeader ivfc;
INSERT_UNION_PADDING_BYTES(0x18);
INSERT_PADDING_BYTES_NOINIT(0x18);
BKTRHeader relocation;
BKTRHeader subsection;
INSERT_UNION_PADDING_BYTES(0xC0);
INSERT_PADDING_BYTES_NOINIT(0xC0);
};
static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");

View File

@@ -51,8 +51,8 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
low = mid + 1;
}
}
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
return {0, 0};
}
} // Anonymous namespace

View File

@@ -105,7 +105,8 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast<u8>(type));
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
return ContentRecordType{};
}
}

View File

@@ -67,18 +67,18 @@ public:
virtual void Refresh() = 0;
virtual bool HasEntry(u64 title_id, ContentRecordType type) const = 0;
virtual bool HasEntry(ContentProviderEntry entry) const;
bool HasEntry(ContentProviderEntry entry) const;
virtual std::optional<u32> GetEntryVersion(u64 title_id) const = 0;
virtual VirtualFile GetEntryUnparsed(u64 title_id, ContentRecordType type) const = 0;
virtual VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
virtual VirtualFile GetEntryRaw(u64 title_id, ContentRecordType type) const = 0;
virtual VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
virtual std::unique_ptr<NCA> GetEntry(u64 title_id, ContentRecordType type) const = 0;
virtual std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
virtual std::vector<ContentProviderEntry> ListEntries() const;

View File

@@ -58,7 +58,7 @@ struct SaveDataAttribute {
SaveDataType type;
SaveDataRank rank;
u16 index;
INSERT_PADDING_BYTES(4);
INSERT_PADDING_BYTES_NOINIT(4);
u64 zero_1;
u64 zero_2;
u64 zero_3;
@@ -72,7 +72,7 @@ struct SaveDataExtraData {
u64 owner_id;
s64 timestamp;
SaveDataFlags flags;
INSERT_PADDING_BYTES(4);
INSERT_PADDING_BYTES_NOINIT(4);
s64 available_size;
s64 journal_size;
s64 commit_id;

View File

@@ -133,8 +133,11 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
}
cache.erase(old_path);
file->Open(new_path, "r+b");
cache.insert_or_assign(new_path, std::move(file));
if (file->Open(new_path, "r+b")) {
cache.insert_or_assign(new_path, std::move(file));
} else {
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
}
} else {
UNREACHABLE();
return nullptr;
@@ -214,9 +217,12 @@ VirtualDir RealVfsFilesystem::MoveDirectory(std::string_view old_path_,
}
auto file = cached.lock();
file->Open(file_new_path, "r+b");
cache.erase(file_old_path);
cache.insert_or_assign(std::move(file_new_path), std::move(file));
if (file->Open(file_new_path, "r+b")) {
cache.insert_or_assign(std::move(file_new_path), std::move(file));
} else {
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", file_new_path);
}
}
return OpenDirectory(new_path, Mode::ReadWrite);

View File

@@ -21,21 +21,18 @@ public:
std::mutex mutex;
bool touch_pressed = false; ///< True if touchpad area is currently pressed, otherwise false
float touch_x = 0.0f; ///< Touchpad X-position
float touch_y = 0.0f; ///< Touchpad Y-position
Input::TouchStatus status;
private:
class Device : public Input::TouchDevice {
public:
explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
std::tuple<float, float, bool> GetStatus() const override {
Input::TouchStatus GetStatus() const override {
if (auto state = touch_state.lock()) {
std::lock_guard guard{state->mutex};
return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
return state->status;
}
return std::make_tuple(0.0f, 0.0f, false);
return {};
}
private:
@@ -79,36 +76,44 @@ std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsi
return std::make_tuple(new_x, new_y);
}
void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) {
return;
}
if (id >= touch_state->status.size()) {
return;
}
std::lock_guard guard{touch_state->mutex};
touch_state->touch_x =
const float x =
static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
static_cast<float>(framebuffer_layout.screen.right - framebuffer_layout.screen.left);
touch_state->touch_y =
const float y =
static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
static_cast<float>(framebuffer_layout.screen.bottom - framebuffer_layout.screen.top);
touch_state->touch_pressed = true;
touch_state->status[id] = std::make_tuple(x, y, true);
}
void EmuWindow::TouchReleased() {
void EmuWindow::TouchReleased(std::size_t id) {
if (id >= touch_state->status.size()) {
return;
}
std::lock_guard guard{touch_state->mutex};
touch_state->touch_pressed = false;
touch_state->touch_x = 0;
touch_state->touch_y = 0;
touch_state->status[id] = std::make_tuple(0.0f, 0.0f, false);
}
void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y) {
if (!touch_state->touch_pressed)
void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
if (id >= touch_state->status.size()) {
return;
}
if (!std::get<2>(touch_state->status[id]))
return;
if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
std::tie(framebuffer_x, framebuffer_y) = ClipToTouchScreen(framebuffer_x, framebuffer_y);
TouchPressed(framebuffer_x, framebuffer_y);
TouchPressed(framebuffer_x, framebuffer_y, id);
}
void EmuWindow::UpdateCurrentFramebufferLayout(unsigned width, unsigned height) {

View File

@@ -117,18 +117,23 @@ public:
* Signal that a touch pressed event has occurred (e.g. mouse click pressed)
* @param framebuffer_x Framebuffer x-coordinate that was pressed
* @param framebuffer_y Framebuffer y-coordinate that was pressed
* @param id Touch event ID
*/
void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y);
void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
/// Signal that a touch released event has occurred (e.g. mouse click released)
void TouchReleased();
/**
* Signal that a touch released event has occurred (e.g. mouse click released)
* @param id Touch event ID
*/
void TouchReleased(std::size_t id);
/**
* Signal that a touch movement event has occurred (e.g. mouse was moved over the emu window)
* @param framebuffer_x Framebuffer x-coordinate
* @param framebuffer_y Framebuffer y-coordinate
* @param id Touch event ID
*/
void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y);
void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
/**
* Returns currently active configuration.

View File

@@ -163,10 +163,11 @@ using MotionStatus = std::tuple<Common::Vec3<float>, Common::Vec3<float>, Common
using MotionDevice = InputDevice<MotionStatus>;
/**
* A touch status is an object that returns a tuple of two floats and a bool. The floats are
* x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is pressed.
* A touch status is an object that returns an array of 16 tuple elements of two floats and a bool.
* The floats are x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is
* pressed.
*/
using TouchStatus = std::tuple<float, float, bool>;
using TouchStatus = std::array<std::tuple<float, float, bool>, 16>;
/**
* A touch device is an input device that returns a touch status object

View File

@@ -25,6 +25,10 @@ void InputInterpreter::PollInput() {
button_states[current_index] = button_state;
}
bool InputInterpreter::IsButtonPressed(HIDButton button) const {
return (button_states[current_index] & (1U << static_cast<u8>(button))) != 0;
}
bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const {
const bool current_press =
(button_states[current_index] & (1U << static_cast<u8>(button))) != 0;

View File

@@ -66,6 +66,27 @@ public:
/// Gets a button state from HID and inserts it into the array of button states.
void PollInput();
/**
* Checks whether the button is pressed.
*
* @param button The button to check.
*
* @returns True when the button is pressed.
*/
[[nodiscard]] bool IsButtonPressed(HIDButton button) const;
/**
* Checks whether any of the buttons in the parameter list is pressed.
*
* @tparam HIDButton The buttons to check.
*
* @returns True when at least one of the buttons is pressed.
*/
template <HIDButton... T>
[[nodiscard]] bool IsAnyButtonPressed() {
return (IsButtonPressed(T) || ...);
}
/**
* The specified button is considered to be pressed once
* if it is currently pressed and not pressed previously.
@@ -79,12 +100,12 @@ public:
/**
* Checks whether any of the buttons in the parameter list is pressed once.
*
* @tparam HIDButton The buttons to check.
* @tparam T The buttons to check.
*
* @returns True when at least one of the buttons is pressed once.
*/
template <HIDButton... T>
[[nodiscard]] bool IsAnyButtonPressedOnce() {
[[nodiscard]] bool IsAnyButtonPressedOnce() const {
return (IsButtonPressedOnce(T) || ...);
}
@@ -100,12 +121,12 @@ public:
/**
* Checks whether any of the buttons in the parameter list is held down.
*
* @tparam HIDButton The buttons to check.
* @tparam T The buttons to check.
*
* @returns True when at least one of the buttons is held down.
*/
template <HIDButton... T>
[[nodiscard]] bool IsAnyButtonHeld() {
[[nodiscard]] bool IsAnyButtonHeld() const {
return (IsButtonHeld(T) || ...);
}

View File

@@ -4,8 +4,10 @@
#pragma once
#include <array>
#include <tuple>
#include "common/bit_util.h"
#include "common/common_types.h"
namespace Core {
@@ -18,34 +20,12 @@ constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch cpu frequency is 1020MHz u
constexpr u64 CNTFREQ = 19200000; // Switch's hardware clock speed
constexpr u32 NUM_CPU_CORES = 4; // Number of CPU Cores
} // namespace Hardware
constexpr u32 INVALID_HOST_THREAD_ID = 0xFFFFFFFF;
struct EmuThreadHandle {
u32 host_handle;
u32 guest_handle;
u64 GetRaw() const {
return (static_cast<u64>(host_handle) << 32) | guest_handle;
}
bool operator==(const EmuThreadHandle& rhs) const {
return std::tie(host_handle, guest_handle) == std::tie(rhs.host_handle, rhs.guest_handle);
}
bool operator!=(const EmuThreadHandle& rhs) const {
return !operator==(rhs);
}
static constexpr EmuThreadHandle InvalidHandle() {
constexpr u32 invalid_handle = 0xFFFFFFFF;
return {invalid_handle, invalid_handle};
}
bool IsInvalid() const {
return (*this) == InvalidHandle();
}
// Virtual to Physical core map.
constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
};
} // namespace Hardware
} // namespace Core

View File

@@ -146,7 +146,7 @@ static_assert(sizeof(BufferDescriptorC) == 8, "BufferDescriptorC size is incorre
struct DataPayloadHeader {
u32_le magic;
INSERT_PADDING_WORDS(1);
INSERT_PADDING_WORDS_NOINIT(1);
};
static_assert(sizeof(DataPayloadHeader) == 8, "DataPayloadHeader size is incorrect");
@@ -160,7 +160,7 @@ struct DomainMessageHeader {
// Used when responding to an IPC request, Server -> Client.
struct {
u32_le num_objects;
INSERT_UNION_PADDING_WORDS(3);
INSERT_PADDING_WORDS_NOINIT(3);
};
// Used when performing an IPC request, Client -> Server.
@@ -171,10 +171,10 @@ struct DomainMessageHeader {
BitField<16, 16, u32> size;
};
u32_le object_id;
INSERT_UNION_PADDING_WORDS(2);
INSERT_PADDING_WORDS_NOINIT(2);
};
std::array<u32, 4> raw{};
std::array<u32, 4> raw;
};
};
static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect");

View File

@@ -1,317 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <vector>
#include "common/assert.h"
#include "common/common_types.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace Kernel {
// Wake up num_to_wake (or all) threads in a vector.
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
s32 num_to_wake) {
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
std::size_t last = waiting_threads.size();
if (num_to_wake > 0) {
last = std::min(last, static_cast<std::size_t>(num_to_wake));
}
// Signal the waiting threads.
for (std::size_t i = 0; i < last; i++) {
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
RemoveThread(waiting_threads[i]);
waiting_threads[i]->WaitForArbitration(false);
waiting_threads[i]->ResumeFromWait();
}
}
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
AddressArbiter::~AddressArbiter() = default;
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
s32 num_to_wake) {
switch (type) {
case SignalType::Signal:
return SignalToAddressOnly(address, num_to_wake);
case SignalType::IncrementAndSignalIfEqual:
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
default:
return ERR_INVALID_ENUM_VALUE;
}
}
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
KScopedSchedulerLock lock(system.Kernel());
const std::vector<std::shared_ptr<Thread>> waiting_threads =
GetThreadsWaitingOnAddress(address);
WakeThreads(waiting_threads, num_to_wake);
return RESULT_SUCCESS;
}
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
if (!memory.IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
}
const std::size_t current_core = system.CurrentCoreIndex();
auto& monitor = system.Monitor();
u32 current_value;
do {
current_value = monitor.ExclusiveRead32(current_core, address);
if (current_value != static_cast<u32>(value)) {
return ERR_INVALID_STATE;
}
current_value++;
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
return SignalToAddressOnly(address, num_to_wake);
}
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake) {
KScopedSchedulerLock lock(system.Kernel());
auto& memory = system.Memory();
// Ensure that we can write to the address.
if (!memory.IsValidVirtualAddress(address)) {
return ERR_INVALID_ADDRESS_STATE;
}
// Get threads waiting on the address.
const std::vector<std::shared_ptr<Thread>> waiting_threads =
GetThreadsWaitingOnAddress(address);
const std::size_t current_core = system.CurrentCoreIndex();
auto& monitor = system.Monitor();
s32 updated_value;
do {
updated_value = monitor.ExclusiveRead32(current_core, address);
if (updated_value != value) {
return ERR_INVALID_STATE;
}
// Determine the modified value depending on the waiting count.
if (num_to_wake <= 0) {
if (waiting_threads.empty()) {
updated_value = value + 1;
} else {
updated_value = value - 1;
}
} else {
if (waiting_threads.empty()) {
updated_value = value + 1;
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value - 1;
} else {
updated_value = value;
}
}
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
WakeThreads(waiting_threads, num_to_wake);
return RESULT_SUCCESS;
}
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
s64 timeout_ns) {
switch (type) {
case ArbitrationType::WaitIfLessThan:
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
case ArbitrationType::DecrementAndWaitIfLessThan:
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
case ArbitrationType::WaitIfEqual:
return WaitForAddressIfEqual(address, value, timeout_ns);
default:
return ERR_INVALID_ENUM_VALUE;
}
}
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
bool should_decrement) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
return ERR_THREAD_TERMINATING;
}
// Ensure that we can read the address.
if (!memory.IsValidVirtualAddress(address)) {
lock.CancelSleep();
return ERR_INVALID_ADDRESS_STATE;
}
s32 current_value = static_cast<s32>(memory.Read32(address));
if (current_value >= value) {
lock.CancelSleep();
return ERR_INVALID_STATE;
}
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
s32 decrement_value;
const std::size_t current_core = system.CurrentCoreIndex();
auto& monitor = system.Monitor();
do {
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
if (should_decrement) {
decrement_value = current_value - 1;
} else {
decrement_value = current_value;
}
} while (
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
// Short-circuit without rescheduling, if timeout is zero.
if (timeout == 0) {
lock.CancelSleep();
return RESULT_TIMEOUT;
}
current_thread->SetArbiterWaitAddress(address);
InsertThread(SharedFrom(current_thread));
current_thread->SetStatus(ThreadStatus::WaitArb);
current_thread->WaitForArbitration(true);
}
if (event_handle != InvalidHandle) {
auto& time_manager = kernel.TimeManager();
time_manager.UnscheduleTimeEvent(event_handle);
}
{
KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
}
}
return current_thread->GetSignalingResult();
}
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
auto& memory = system.Memory();
auto& kernel = system.Kernel();
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
Handle event_handle = InvalidHandle;
{
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
if (current_thread->IsPendingTermination()) {
lock.CancelSleep();
return ERR_THREAD_TERMINATING;
}
// Ensure that we can read the address.
if (!memory.IsValidVirtualAddress(address)) {
lock.CancelSleep();
return ERR_INVALID_ADDRESS_STATE;
}
s32 current_value = static_cast<s32>(memory.Read32(address));
if (current_value != value) {
lock.CancelSleep();
return ERR_INVALID_STATE;
}
// Short-circuit without rescheduling, if timeout is zero.
if (timeout == 0) {
lock.CancelSleep();
return RESULT_TIMEOUT;
}
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
current_thread->SetArbiterWaitAddress(address);
InsertThread(SharedFrom(current_thread));
current_thread->SetStatus(ThreadStatus::WaitArb);
current_thread->WaitForArbitration(true);
}
if (event_handle != InvalidHandle) {
auto& time_manager = kernel.TimeManager();
time_manager.UnscheduleTimeEvent(event_handle);
}
{
KScopedSchedulerLock lock(kernel);
if (current_thread->IsWaitingForArbitration()) {
RemoveThread(SharedFrom(current_thread));
current_thread->WaitForArbitration(false);
}
}
return current_thread->GetSignalingResult();
}
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
const VAddr arb_addr = thread->GetArbiterWaitAddress();
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
const auto iter =
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
return entry->GetPriority() >= thread->GetPriority();
});
if (iter == thread_list.cend()) {
thread_list.push_back(std::move(thread));
} else {
thread_list.insert(iter, std::move(thread));
}
}
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
const VAddr arb_addr = thread->GetArbiterWaitAddress();
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
[&thread](const auto& entry) { return thread == entry; });
if (iter != thread_list.cend()) {
thread_list.erase(iter);
}
}
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
VAddr address) const {
const auto iter = arb_threads.find(address);
if (iter == arb_threads.cend()) {
return {};
}
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
return {thread_list.cbegin(), thread_list.cend()};
}
} // namespace Kernel

View File

@@ -1,91 +0,0 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <list>
#include <memory>
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
union ResultCode;
namespace Core {
class System;
}
namespace Kernel {
class Thread;
class AddressArbiter {
public:
enum class ArbitrationType {
WaitIfLessThan = 0,
DecrementAndWaitIfLessThan = 1,
WaitIfEqual = 2,
};
enum class SignalType {
Signal = 0,
IncrementAndSignalIfEqual = 1,
ModifyByWaitingCountAndSignalIfEqual = 2,
};
explicit AddressArbiter(Core::System& system);
~AddressArbiter();
AddressArbiter(const AddressArbiter&) = delete;
AddressArbiter& operator=(const AddressArbiter&) = delete;
AddressArbiter(AddressArbiter&&) = default;
AddressArbiter& operator=(AddressArbiter&&) = delete;
/// Signals an address being waited on with a particular signaling type.
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
/// Waits on an address with a particular arbitration type.
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
private:
/// Signals an address being waited on.
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
/// Signals an address being waited on and increments its value if equal to the value argument.
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
/// Signals an address being waited on and modifies its value based on waiting thread count if
/// equal to the value argument.
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
s32 num_to_wake);
/// Waits on an address if the value passed is less than the argument value,
/// optionally decrementing.
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
bool should_decrement);
/// Waits on an address if the value passed is equal to the argument value.
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
/// Wake up num_to_wake (or all) threads in a vector.
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
/// Insert a thread into the address arbiter container
void InsertThread(std::shared_ptr<Thread> thread);
/// Removes a thread from the address arbiter container
void RemoveThread(std::shared_ptr<Thread> thread);
// Gets the threads waiting on an address.
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
/// List of threads waiting for a address arbiter
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
Core::System& system;
};
} // namespace Kernel

View File

@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
server_port->AppendPendingSession(std::move(server));
}
// Wake the threads waiting on the ServerPort
server_port->Signal();
return MakeResult(std::move(client));
}

View File

@@ -51,6 +51,8 @@ public:
*/
void ConnectionClosed();
void Finalize() override {}
private:
std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have

View File

@@ -5,14 +5,14 @@
#include "core/hle/kernel/client_session.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/result.h"
namespace Kernel {
ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
ClientSession::~ClientSession() {
// This destructor will be called automatically when the last ClientSession handle is closed by
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
}
}
bool ClientSession::ShouldWait(const Thread* thread) const {
UNIMPLEMENTED();
return {};
}
void ClientSession::Acquire(Thread* thread) {
UNIMPLEMENTED();
}
bool ClientSession::IsSignaled() const {
UNIMPLEMENTED();
return true;
@@ -47,7 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
return MakeResult(std::move(client_session));
}
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread,
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) {
// Keep ServerSession alive until we're done working with it.

View File

@@ -7,7 +7,7 @@
#include <memory>
#include <string>
#include "core/hle/kernel/synchronization_object.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
union ResultCode;
@@ -24,9 +24,9 @@ namespace Kernel {
class KernelCore;
class Session;
class Thread;
class KThread;
class ClientSession final : public SynchronizationObject {
class ClientSession final : public KSynchronizationObject {
public:
explicit ClientSession(KernelCore& kernel);
~ClientSession() override;
@@ -46,15 +46,13 @@ public:
return HANDLE_TYPE;
}
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing);
bool ShouldWait(const Thread* thread) const override;
void Acquire(Thread* thread) override;
bool IsSignaled() const override;
void Finalize() override {}
private:
static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
std::shared_ptr<Session> parent,

View File

@@ -13,12 +13,14 @@ namespace Kernel {
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
@@ -28,6 +30,7 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};

View File

@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread));
}
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());

View File

@@ -12,7 +12,8 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel {
@@ -20,8 +21,12 @@ class KernelCore;
class SchedulerLock;
using KSchedulerPriorityQueue =
KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
constexpr s32 HighestCoreMigrationAllowedPriority = 2;
KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, Svc::LowestThreadPriority,
Svc::HighestThreadPriority>;
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
static_assert(Svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
static_assert(Svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
class GlobalSchedulerContext final {
friend class KScheduler;
@@ -33,13 +38,13 @@ public:
~GlobalSchedulerContext();
/// Adds a new thread to the scheduler
void AddThread(std::shared_ptr<Thread> thread);
void AddThread(std::shared_ptr<KThread> thread);
/// Removes a thread from the scheduler
void RemoveThread(std::shared_ptr<Thread> thread);
void RemoveThread(std::shared_ptr<KThread> thread);
/// Returns a list of all threads managed by the scheduler
[[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
return thread_list;
}
@@ -74,7 +79,7 @@ private:
LockType scheduler_lock;
/// Lists all thread ids that aren't deleted/etc.
std::vector<std::shared_ptr<Thread>> thread_list;
std::vector<std::shared_ptr<KThread>> thread_list;
Common::SpinLock global_list_guard{};
};

View File

@@ -9,9 +9,9 @@
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
namespace Kernel {
namespace {
@@ -89,6 +89,10 @@ ResultCode HandleTable::Close(Handle handle) {
const u16 slot = GetSlot(handle);
if (objects[slot].use_count() == 1) {
objects[slot]->Finalize();
}
objects[slot] = nullptr;
generations[slot] = next_free_slot;

View File

@@ -17,16 +17,16 @@
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/server_session.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
namespace Kernel {
@@ -48,7 +48,7 @@ void SessionRequestHandler::ClientDisconnected(
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> server_session,
std::shared_ptr<Thread> thread)
std::shared_ptr<KThread> thread)
: server_session(std::move(server_session)),
thread(std::move(thread)), kernel{kernel}, memory{memory} {
cmd_buf[0] = 0;
@@ -182,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
return RESULT_SUCCESS;
}
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) {
ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
auto& owner_process = *thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
@@ -338,6 +338,28 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons
return 0;
}
bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
if (is_buffer_a) {
return BufferDescriptorA().size() > buffer_index;
} else {
return BufferDescriptorX().size() > buffer_index;
}
}
bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
BufferDescriptorB()[buffer_index].Size()};
if (is_buffer_b) {
return BufferDescriptorB().size() > buffer_index;
} else {
return BufferDescriptorC().size() > buffer_index;
}
}
std::string HLERequestContext::Description() const {
if (!command_header) {
return "No command header available";

View File

@@ -40,9 +40,9 @@ class HLERequestContext;
class KernelCore;
class Process;
class ServerSession;
class Thread;
class ReadableEvent;
class WritableEvent;
class KThread;
class KReadableEvent;
class KWritableEvent;
enum class ThreadWakeupReason;
@@ -110,7 +110,7 @@ class HLERequestContext {
public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session,
std::shared_ptr<Thread> thread);
std::shared_ptr<KThread> thread);
~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request.
@@ -126,15 +126,12 @@ public:
return server_session;
}
using WakeupCallback = std::function<void(
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
/// Populates this context with data from the requesting process/thread.
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
ResultCode WriteToOutgoingCommandBuffer(Thread& thread);
ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
u32_le GetCommand() const {
return command;
@@ -207,6 +204,12 @@ public:
/// Helper function to get the size of the output buffer
std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
/// Helper function to test whether the input buffer at buffer_index can be read
bool CanReadBuffer(std::size_t buffer_index = 0) const;
/// Helper function to test whether the output buffer at buffer_index can be written
bool CanWriteBuffer(std::size_t buffer_index = 0) const;
template <typename T>
std::shared_ptr<T> GetCopyObject(std::size_t index) {
return DynamicObjectCast<T>(copy_objects.at(index));
@@ -261,11 +264,11 @@ public:
std::string Description() const;
Thread& GetThread() {
KThread& GetThread() {
return *thread;
}
const Thread& GetThread() const {
const KThread& GetThread() const {
return *thread;
}
@@ -280,7 +283,7 @@ private:
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session;
std::shared_ptr<Thread> thread;
std::shared_ptr<KThread> thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;

View File

@@ -0,0 +1,365 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/time_manager.h"
#include "core/memory.h"
namespace Kernel {
KAddressArbiter::KAddressArbiter(Core::System& system_)
: system{system_}, kernel{system.Kernel()} {}
KAddressArbiter::~KAddressArbiter() = default;
namespace {
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
*out = system.Memory().Read32(address);
return true;
}
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
// Compare it to the desired one.
if (current_value < value) {
// If less than, we want to try to decrement.
const s32 decrement_value = current_value - 1;
// Decrement and try to store.
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
// If we failed to store, try again.
DecrementIfLessThan(system, out, address, value);
}
} else {
// Otherwise, clear our exclusive hold and finish
monitor.ClearExclusive();
}
// We're done.
*out = current_value;
return true;
}
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex();
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
// Compare it to the desired one.
if (current_value == value) {
// If equal, we want to try to write the new value.
// Try to store.
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
// If we failed to store, try again.
UpdateIfEqual(system, out, address, value, new_value);
}
} else {
// Otherwise, clear our exclusive hold and finish.
monitor.ClearExclusive();
}
// We're done.
*out = current_value;
return true;
}
} // namespace
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return RESULT_SUCCESS;
}
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
KScopedSchedulerLock sl(kernel);
// Check the userspace value.
s32 user_value{};
R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
Svc::ResultInvalidCurrentMemory);
if (user_value != value) {
return Svc::ResultInvalidState;
}
auto it = thread_tree.nfind_light({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return RESULT_SUCCESS;
}
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_light({addr, -1});
// Determine the updated value.
s32 new_value{};
if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
if (count <= 0) {
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
new_value = value - 2;
} else {
new_value = value + 1;
}
} else {
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
auto tmp_it = it;
s32 tmp_num_waiters{};
while ((++tmp_it != thread_tree.end()) &&
(tmp_it->GetAddressArbiterKey() == addr)) {
if ((tmp_num_waiters++) >= count) {
break;
}
}
if (tmp_num_waiters < count) {
new_value = value - 1;
} else {
new_value = value;
}
} else {
new_value = value + 1;
}
}
} else {
if (count <= 0) {
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
new_value = value - 1;
} else {
new_value = value + 1;
}
} else {
auto tmp_it = it;
s32 tmp_num_waiters{};
while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
(tmp_num_waiters < count + 1)) {
++tmp_num_waiters;
++tmp_it;
}
if (tmp_num_waiters == 0) {
new_value = value + 1;
} else if (tmp_num_waiters <= count) {
new_value = value - 1;
} else {
new_value = value;
}
}
}
// Check the userspace value.
s32 user_value{};
bool succeeded{};
if (value != new_value) {
succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
} else {
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
}
R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
if (user_value != value) {
return Svc::ResultInvalidState;
}
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
KThread* target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = thread_tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return RESULT_SUCCESS;
}
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
return Svc::ResultTerminationRequested;
}
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
// Read the value from userspace.
s32 user_value{};
bool succeeded{};
if (decrement) {
succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
} else {
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
}
if (!succeeded) {
slp.CancelSleep();
return Svc::ResultInvalidCurrentMemory;
}
// Check that the value is less than the specified one.
if (user_value >= value) {
slp.CancelSleep();
return Svc::ResultInvalidState;
}
// Check that the timeout is non-zero.
if (timeout == 0) {
slp.CancelSleep();
return Svc::ResultTimedOut;
}
// Set the arbiter.
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
thread_tree.insert(*cur_thread);
cur_thread->SetState(ThreadState::Waiting);
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
}
// Cancel the timer wait.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the address arbiter.
{
KScopedSchedulerLock sl(kernel);
if (cur_thread->IsWaitingForAddressArbiter()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearAddressArbiter();
}
}
// Get the result.
KSynchronizationObject* dummy{};
return cur_thread->GetWaitResult(std::addressof(dummy));
}
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
return Svc::ResultTerminationRequested;
}
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
// Read the value from userspace.
s32 user_value{};
if (!ReadFromUser(system, std::addressof(user_value), addr)) {
slp.CancelSleep();
return Svc::ResultInvalidCurrentMemory;
}
// Check that the value is equal.
if (value != user_value) {
slp.CancelSleep();
return Svc::ResultInvalidState;
}
// Check that the timeout is non-zero.
if (timeout == 0) {
slp.CancelSleep();
return Svc::ResultTimedOut;
}
// Set the arbiter.
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
thread_tree.insert(*cur_thread);
cur_thread->SetState(ThreadState::Waiting);
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
}
// Cancel the timer wait.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the address arbiter.
{
KScopedSchedulerLock sl(kernel);
if (cur_thread->IsWaitingForAddressArbiter()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearAddressArbiter();
}
}
// Get the result.
KSynchronizationObject* dummy{};
return cur_thread->GetWaitResult(std::addressof(dummy));
}
} // namespace Kernel

View File

@@ -0,0 +1,70 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/svc_types.h"
union ResultCode;
namespace Core {
class System;
}
namespace Kernel {
class KernelCore;
class KAddressArbiter {
public:
using ThreadTree = KConditionVariable::ThreadTree;
explicit KAddressArbiter(Core::System& system_);
~KAddressArbiter();
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
s32 count) {
switch (type) {
case Svc::SignalType::Signal:
return Signal(addr, count);
case Svc::SignalType::SignalAndIncrementIfEqual:
return SignalAndIncrementIfEqual(addr, value, count);
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
}
UNREACHABLE();
return RESULT_UNKNOWN;
}
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
return WaitIfLessThan(addr, value, false, timeout);
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
return WaitIfLessThan(addr, value, true, timeout);
case Svc::ArbitrationType::WaitIfEqual:
return WaitIfEqual(addr, value, timeout);
}
UNREACHABLE();
return RESULT_UNKNOWN;
}
private:
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
ThreadTree thread_tree;
Core::System& system;
KernelCore& kernel;
};
} // namespace Kernel

View File

@@ -27,7 +27,7 @@ public:
}
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
return this->mask & GetCoreBit(core);
return (this->mask & GetCoreBit(core)) != 0;
}
constexpr void SetAffinity(s32 core, bool set) {

View File

@@ -0,0 +1,345 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <vector>
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_results.h"
#include "core/memory.h"
namespace Kernel {
namespace {
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
*out = system.Memory().Read32(address);
return true;
}
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
system.Memory().Write32(address, *p);
return true;
}
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.CurrentCoreIndex();
// Load the value from the address.
const auto expected = monitor.ExclusiveRead32(current_core, address);
// Orr in the new mask.
u32 value = expected | new_orr_mask;
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
if (!expected) {
value = if_zero;
}
// Try to store.
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
// If we failed to store, try again.
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
}
// We're done.
*out = expected;
return true;
}
} // namespace
KConditionVariable::KConditionVariable(Core::System& system_)
: system{system_}, kernel{system.Kernel()} {}
KConditionVariable::~KConditionVariable() = default;
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
// Signal the address.
{
KScopedSchedulerLock sl(kernel);
// Remove waiter thread.
s32 num_waiters{};
KThread* next_owner_thread =
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
// Determine the next tag.
u32 next_value{};
if (next_owner_thread) {
next_value = next_owner_thread->GetAddressKeyValue();
if (num_waiters > 1) {
next_value |= Svc::HandleWaitMask;
}
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
next_owner_thread->Wakeup();
}
// Write the value to userspace.
if (!WriteToUser(system, addr, std::addressof(next_value))) {
if (next_owner_thread) {
next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
}
return Svc::ResultInvalidCurrentMemory;
}
}
return RESULT_SUCCESS;
}
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
// Wait for the address.
{
std::shared_ptr<KThread> owner_thread;
ASSERT(!owner_thread);
{
KScopedSchedulerLock sl(kernel);
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
// Check if the thread should terminate.
R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
{
// Read the tag from userspace.
u32 test_tag{};
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
Svc::ResultInvalidCurrentMemory);
// If the tag isn't the handle (with wait mask), we're done.
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
// Get the lock owner thread.
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
// Update the lock.
cur_thread->SetAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread);
cur_thread->SetState(ThreadState::Waiting);
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr);
}
}
ASSERT(owner_thread);
}
// Remove the thread as a waiter from the lock owner.
{
KScopedSchedulerLock sl(kernel);
KThread* owner_thread = cur_thread->GetLockOwner();
if (owner_thread != nullptr) {
owner_thread->RemoveWaiter(cur_thread);
}
}
// Get the wait result.
KSynchronizationObject* dummy{};
return cur_thread->GetWaitResult(std::addressof(dummy));
}
KThread* KConditionVariable::SignalImpl(KThread* thread) {
// Check pre-conditions.
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Update the tag.
VAddr address = thread->GetAddressKey();
u32 own_tag = thread->GetAddressKeyValue();
u32 prev_tag{};
bool can_access{};
{
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
// TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true;
if (can_access) {
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
Svc::HandleWaitMask);
}
}
KThread* thread_to_close = nullptr;
if (can_access) {
if (prev_tag == InvalidHandle) {
// If nobody held the lock previously, we're all good.
thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
thread->Wakeup();
} else {
// Get the previous owner.
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
prev_tag & ~Svc::HandleWaitMask);
if (owner_thread) {
// Add the thread as a waiter on the owner.
owner_thread->AddWaiter(thread);
thread_to_close = owner_thread.get();
} else {
// The lock was tagged with a thread that doesn't exist.
thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
thread->Wakeup();
}
}
} else {
// If the address wasn't accessible, note so.
thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
thread->Wakeup();
}
return thread_to_close;
}
void KConditionVariable::Signal(u64 cv_key, s32 count) {
// Prepare for signaling.
constexpr int MaxThreads = 16;
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
// std::shared_ptr.
std::vector<std::shared_ptr<KThread>> thread_list;
std::array<KThread*, MaxThreads> thread_array;
s32 num_to_close{};
// Perform signaling.
s32 num_waiters{};
{
KScopedSchedulerLock sl(kernel);
auto it = thread_tree.nfind_light({cv_key, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it);
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
if (num_to_close < MaxThreads) {
thread_array[num_to_close++] = thread;
} else {
thread_list.push_back(SharedFrom(thread));
}
}
it = thread_tree.erase(it);
target_thread->ClearConditionVariable();
++num_waiters;
}
// If we have no waiters, clear the has waiter flag.
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
const u32 has_waiter_flag{};
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
}
}
// Close threads in the array.
for (auto i = 0; i < num_to_close; ++i) {
thread_array[i]->Close();
}
// Close threads in the list.
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
(*it)->Close();
}
}
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
// Set the synced object.
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
// Check that the thread isn't terminating.
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
return Svc::ResultTerminationRequested;
}
// Update the value and process for the next owner.
{
// Remove waiter thread.
s32 num_waiters{};
KThread* next_owner_thread =
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
// Update for the next owner thread.
u32 next_value{};
if (next_owner_thread != nullptr) {
// Get the next tag value.
next_value = next_owner_thread->GetAddressKeyValue();
if (num_waiters > 1) {
next_value |= Svc::HandleWaitMask;
}
// Wake up the next owner.
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
next_owner_thread->Wakeup();
}
// Write to the cv key.
{
const u32 has_waiter_flag = 1;
WriteToUser(system, key, std::addressof(has_waiter_flag));
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
}
// Write the value to userspace.
if (!WriteToUser(system, addr, std::addressof(next_value))) {
slp.CancelSleep();
return Svc::ResultInvalidCurrentMemory;
}
}
// Update condition variable tracking.
{
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
thread_tree.insert(*cur_thread);
}
// If the timeout is non-zero, set the thread as waiting.
if (timeout != 0) {
cur_thread->SetState(ThreadState::Waiting);
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
cur_thread->SetMutexWaitAddressForDebugging(addr);
}
}
// Cancel the timer wait.
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
// Remove from the condition variable.
{
KScopedSchedulerLock sl(kernel);
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(cur_thread);
}
if (cur_thread->IsWaitingForConditionVariable()) {
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
cur_thread->ClearConditionVariable();
}
}
// Get the result.
KSynchronizationObject* dummy{};
return cur_thread->GetWaitResult(std::addressof(dummy));
}
} // namespace Kernel

View File

@@ -0,0 +1,59 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/result.h"
namespace Core {
class System;
}
namespace Kernel {
class KConditionVariable {
public:
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
explicit KConditionVariable(Core::System& system_);
~KConditionVariable();
// Arbitration
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private:
[[nodiscard]] KThread* SignalImpl(KThread* thread);
ThreadTree thread_tree;
Core::System& system;
KernelCore& kernel;
};
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
tree->erase(tree->iterator_to(*thread));
}
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
tree->insert(*thread);
}
} // namespace Kernel

View File

@@ -0,0 +1,32 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_writable_event.h"
namespace Kernel {
KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {}
KEvent::~KEvent() = default;
std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) {
return std::make_shared<KEvent>(kernel, std::move(name));
}
void KEvent::Initialize() {
// Create our sub events.
readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable");
writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable");
// Initialize our sub sessions.
readable_event->Initialize(this);
writable_event->Initialize(this);
// Mark initialized.
initialized = true;
}
} // namespace Kernel

View File

@@ -0,0 +1,57 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/kernel/object.h"
namespace Kernel {
class KernelCore;
class KReadableEvent;
class KWritableEvent;
class KEvent final : public Object {
public:
explicit KEvent(KernelCore& kernel, std::string&& name);
~KEvent() override;
static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name);
void Initialize();
void Finalize() override {}
std::string GetTypeName() const override {
return "KEvent";
}
static constexpr HandleType HANDLE_TYPE = HandleType::Event;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
std::shared_ptr<KReadableEvent>& GetReadableEvent() {
return readable_event;
}
std::shared_ptr<KWritableEvent>& GetWritableEvent() {
return writable_event;
}
const std::shared_ptr<KReadableEvent>& GetReadableEvent() const {
return readable_event;
}
const std::shared_ptr<KWritableEvent>& GetWritableEvent() const {
return writable_event;
}
private:
std::shared_ptr<KReadableEvent> readable_event;
std::shared_ptr<KWritableEvent> writable_event;
bool initialized{};
};
} // namespace Kernel

View File

@@ -0,0 +1,57 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
class KernelCore;
class KLightConditionVariable {
public:
explicit KLightConditionVariable(KernelCore& kernel) : thread_queue(kernel), kernel(kernel) {}
void Wait(KLightLock* lock, s64 timeout = -1) {
WaitImpl(lock, timeout);
lock->Lock();
}
void Broadcast() {
KScopedSchedulerLock lk{kernel};
while (thread_queue.WakeupFrontThread() != nullptr) {
// We want to signal all threads, and so should continue waking up until there's nothing
// to wake.
}
}
private:
void WaitImpl(KLightLock* lock, s64 timeout) {
KThread* owner = GetCurrentThreadPointer(kernel);
// Sleep the thread.
{
KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
lock->Unlock();
if (!thread_queue.SleepThread(owner)) {
lk.CancelSleep();
return;
}
}
// Cancel the task that the sleep setup.
kernel.TimeManager().UnscheduleTimeEvent(owner);
}
KThreadQueue thread_queue;
KernelCore& kernel;
};
} // namespace Kernel

View File

@@ -0,0 +1,130 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
void KLightLock::Lock() {
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
const uintptr_t cur_thread_tag = (cur_thread | 1);
while (true) {
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
std::memory_order_acquire)) {
if ((old_tag | 1) == cur_thread_tag) {
return;
}
}
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
break;
}
LockSlowPath(old_tag | 1, cur_thread);
}
}
void KLightLock::Unlock() {
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
uintptr_t expected = cur_thread;
do {
if (expected != cur_thread) {
return UnlockSlowPath(cur_thread);
}
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
}
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
// Pend the current thread waiting on the owner thread.
{
KScopedSchedulerLock sl{kernel};
// Ensure we actually have locking to do.
if (tag.load(std::memory_order_relaxed) != _owner) {
return;
}
// Add the current thread as a waiter on the owner.
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
owner_thread->AddWaiter(cur_thread);
// Set thread states.
if (cur_thread->GetState() == ThreadState::Runnable) {
cur_thread->SetState(ThreadState::Waiting);
} else {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
if (owner_thread->IsSuspended()) {
owner_thread->ContinueIfHasKernelWaiters();
}
}
// We're no longer waiting on the lock owner.
{
KScopedSchedulerLock sl{kernel};
KThread* owner_thread = cur_thread->GetLockOwner();
if (owner_thread) {
owner_thread->RemoveWaiter(cur_thread);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
}
}
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
KThread* owner_thread = reinterpret_cast<KThread*>(_cur_thread);
// Unlock.
{
KScopedSchedulerLock sl{kernel};
// Get the next owner.
s32 num_waiters = 0;
KThread* next_owner = owner_thread->RemoveWaiterByKey(
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
// Pass the lock to the next owner.
uintptr_t next_tag = 0;
if (next_owner) {
next_tag = reinterpret_cast<uintptr_t>(next_owner);
if (num_waiters > 1) {
next_tag |= 0x1;
}
if (next_owner->GetState() == ThreadState::Waiting) {
next_owner->SetState(ThreadState::Runnable);
} else {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
if (next_owner->IsSuspended()) {
next_owner->ContinueIfHasKernelWaiters();
}
}
// We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if
// so.
if (owner_thread->IsSuspended()) {
owner_thread->TrySuspend();
}
// Write the new tag value.
tag.store(next_tag);
}
}
bool KLightLock::IsLockedByCurrentThread() const {
return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
}
} // namespace Kernel

View File

@@ -0,0 +1,41 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <atomic>
#include "common/common_types.h"
#include "core/hle/kernel/k_scoped_lock.h"
namespace Kernel {
class KernelCore;
class KLightLock {
public:
explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
void Lock();
void Unlock();
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
void UnlockSlowPath(uintptr_t cur_thread);
bool IsLocked() const {
return tag != 0;
}
bool IsLockedByCurrentThread() const;
private:
std::atomic<uintptr_t> tag{};
KernelCore& kernel;
};
using KScopedLightLock = KScopedLock<KLightLock>;
} // namespace Kernel

View File

@@ -8,27 +8,27 @@
#pragma once
#include <array>
#include <bit>
#include <concepts>
#include "common/assert.h"
#include "common/bit_set.h"
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/concepts.h"
namespace Kernel {
class Thread;
class KThread;
template <typename T>
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
{ t.GetAffinityMask() }
->Common::ConvertibleTo<u64>;
{t.SetAffinityMask(std::declval<u64>())};
{t.SetAffinityMask(0)};
{ t.GetAffinity(std::declval<int32_t>()) }
{ t.GetAffinity(0) }
->std::same_as<bool>;
{t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
{t.SetAffinity(0, false)};
{t.SetAll()};
};
@@ -42,11 +42,11 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
->std::same_as<T*>;
{ (typename T::QueueEntry()).GetPrev() }
->std::same_as<T*>;
{ t.GetPriorityQueueEntry(std::declval<s32>()) }
{ t.GetPriorityQueueEntry(0) }
->std::same_as<typename T::QueueEntry&>;
{t.GetAffinityMask()};
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
{ std::remove_cvref_t<decltype(t.GetAffinityMask())>() }
->KPriorityQueueAffinityMask;
{ t.GetActiveCore() }
@@ -55,17 +55,17 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
->Common::ConvertibleTo<s32>;
};
template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
requires KPriorityQueueMember<Member> class KPriorityQueue {
public:
using AffinityMaskType = typename std::remove_cv_t<
typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
using AffinityMaskType = std::remove_cv_t<
std::remove_reference_t<decltype(std::declval<Member>().GetAffinityMask())>>;
static_assert(LowestPriority >= 0);
static_assert(HighestPriority >= 0);
static_assert(LowestPriority >= HighestPriority);
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
static constexpr size_t NumCores = _NumCores;
static constexpr size_t NumCores = NumCores_;
static constexpr bool IsValidCore(s32 core) {
return 0 <= core && core < static_cast<s32>(NumCores);
@@ -268,7 +268,7 @@ private:
}
constexpr s32 GetNextCore(u64& affinity) {
const s32 core = Common::CountTrailingZeroes64(affinity);
const s32 core = std::countr_zero(affinity);
ClearAffinityBit(affinity, core);
return core;
}
@@ -367,7 +367,7 @@ public:
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr Thread* MoveToScheduledBack(Member* member) {
constexpr KThread* MoveToScheduledBack(Member* member) {
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
member);
}

View File

@@ -0,0 +1,57 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/logging/log.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name)
: KSynchronizationObject{kernel, std::move(name)} {}
KReadableEvent::~KReadableEvent() = default;
bool KReadableEvent::IsSignaled() const {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
return is_signaled;
}
ResultCode KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
is_signaled = true;
NotifyAvailable();
}
return RESULT_SUCCESS;
}
ResultCode KReadableEvent::Clear() {
Reset();
return RESULT_SUCCESS;
}
ResultCode KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
return Svc::ResultInvalidState;
}
is_signaled = false;
return RESULT_SUCCESS;
}
} // namespace Kernel

View File

@@ -0,0 +1,51 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class KEvent;
class KReadableEvent final : public KSynchronizationObject {
public:
explicit KReadableEvent(KernelCore& kernel, std::string&& name);
~KReadableEvent() override;
std::string GetTypeName() const override {
return "KReadableEvent";
}
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
KEvent* GetParent() const {
return parent;
}
void Initialize(KEvent* parent_) {
is_signaled = false;
parent = parent_;
}
bool IsSignaled() const override;
void Finalize() override {}
ResultCode Signal();
ResultCode Clear();
ResultCode Reset();
private:
bool is_signaled{};
KEvent* parent{};
};
} // namespace Kernel

View File

@@ -0,0 +1,152 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#include "common/assert.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
KResourceLimit::KResourceLimit(KernelCore& kernel, Core::System& system)
: Object{kernel}, lock{kernel}, cond_var{kernel}, kernel{kernel}, system(system) {}
KResourceLimit::~KResourceLimit() = default;
s64 KResourceLimit::GetLimitValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
KScopedLightLock lk{lock};
value = limit_values[index];
ASSERT(value >= 0);
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
}
return value;
}
s64 KResourceLimit::GetCurrentValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
KScopedLightLock lk{lock};
value = current_values[index];
ASSERT(value >= 0);
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
}
return value;
}
s64 KResourceLimit::GetPeakValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
KScopedLightLock lk{lock};
value = peak_values[index];
ASSERT(value >= 0);
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
}
return value;
}
s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
const auto index = static_cast<std::size_t>(which);
s64 value{};
{
KScopedLightLock lk(lock);
ASSERT(current_values[index] >= 0);
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
value = limit_values[index] - current_values[index];
}
return value;
}
ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
R_UNLESS(current_values[index] <= value, Svc::ResultInvalidState);
limit_values[index] = value;
return RESULT_SUCCESS;
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value) {
return Reserve(which, value, system.CoreTiming().GetGlobalTimeNs().count() + DefaultTimeout);
}
bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
ASSERT(value >= 0);
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
ASSERT(current_hints[index] <= current_values[index]);
if (current_hints[index] >= limit_values[index]) {
return false;
}
// Loop until we reserve or run out of time.
while (true) {
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
// If we would overflow, don't allow to succeed.
if (current_values[index] + value <= current_values[index]) {
break;
}
if (current_values[index] + value <= limit_values[index]) {
current_values[index] += value;
current_hints[index] += value;
peak_values[index] = std::max(peak_values[index], current_values[index]);
return true;
}
if (current_hints[index] + value <= limit_values[index] &&
(timeout < 0 || system.CoreTiming().GetGlobalTimeNs().count() < timeout)) {
waiter_count++;
cond_var.Wait(&lock, timeout);
waiter_count--;
} else {
break;
}
}
return false;
}
void KResourceLimit::Release(LimitableResource which, s64 value) {
Release(which, value, value);
}
void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
ASSERT(value >= 0);
ASSERT(hint >= 0);
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
ASSERT(current_values[index] <= limit_values[index]);
ASSERT(current_hints[index] <= current_values[index]);
ASSERT(value <= current_values[index]);
ASSERT(hint <= current_hints[index]);
current_values[index] -= value;
current_hints[index] -= hint;
if (waiter_count != 0) {
cond_var.Broadcast();
}
}
} // namespace Kernel

View File

@@ -0,0 +1,81 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#pragma once
#include <array>
#include "common/common_types.h"
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/object.h"
union ResultCode;
namespace Core {
class System;
}
namespace Kernel {
class KernelCore;
enum class LimitableResource : u32 {
PhysicalMemory = 0,
Threads = 1,
Events = 2,
TransferMemory = 3,
Sessions = 4,
Count,
};
constexpr bool IsValidResourceType(LimitableResource type) {
return type < LimitableResource::Count;
}
class KResourceLimit final : public Object {
public:
explicit KResourceLimit(KernelCore& kernel, Core::System& system);
~KResourceLimit();
s64 GetLimitValue(LimitableResource which) const;
s64 GetCurrentValue(LimitableResource which) const;
s64 GetPeakValue(LimitableResource which) const;
s64 GetFreeValue(LimitableResource which) const;
ResultCode SetLimitValue(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value, s64 timeout);
void Release(LimitableResource which, s64 value);
void Release(LimitableResource which, s64 value, s64 hint);
std::string GetTypeName() const override {
return "KResourceLimit";
}
std::string GetName() const override {
return GetTypeName();
}
static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
virtual void Finalize() override {}
private:
using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>;
ResourceArray limit_values{};
ResourceArray current_values{};
ResourceArray current_hints{};
ResourceArray peak_values{};
mutable KLightLock lock;
s32 waiter_count{};
KLightConditionVariable cond_var;
KernelCore& kernel;
Core::System& system;
};
} // namespace Kernel

View File

@@ -5,6 +5,8 @@
// This file references various implementation details from Atmosphere, an open-source firmware for
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#include <bit>
#include "common/assert.h"
#include "common/bit_util.h"
#include "common/fiber.h"
@@ -15,28 +17,33 @@
#include "core/cpu_manager.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
static void IncrementScheduledCount(Kernel::Thread* thread) {
static void IncrementScheduledCount(Kernel::KThread* thread) {
if (auto process = thread->GetOwnerProcess(); process) {
process->IncrementScheduledCount();
}
}
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
Core::EmuThreadHandle global_thread) {
u32 current_core = global_thread.host_handle;
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
(current_core < Core::Hardware::NUM_CPU_CORES);
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
auto scheduler = kernel.CurrentScheduler();
u32 current_core{0xF};
bool must_context_switch{};
if (scheduler) {
current_core = scheduler->core_id;
// TODO(bunnei): Should be set to true when we deprecate single core
must_context_switch = !kernel.IsPhantomModeForSingleCore();
}
while (cores_pending_reschedule != 0) {
u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
if (!must_context_switch || core != current_core) {
auto& phys_core = kernel.PhysicalCore(core);
@@ -54,28 +61,27 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
}
}
u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
std::scoped_lock lock{guard};
if (Thread* prev_highest_thread = this->state.highest_priority_thread;
if (KThread* prev_highest_thread = state.highest_priority_thread;
prev_highest_thread != highest_thread) {
if (prev_highest_thread != nullptr) {
IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
}
if (this->state.should_count_idle) {
if (state.should_count_idle) {
if (highest_thread != nullptr) {
// if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
// process->SetRunningThread(this->core_id, highest_thread,
// this->state.idle_count);
//}
if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
process->SetRunningThread(core_id, highest_thread, state.idle_count);
}
} else {
this->state.idle_count++;
state.idle_count++;
}
}
this->state.highest_priority_thread = highest_thread;
this->state.needs_scheduling = true;
return (1ULL << this->core_id);
state.highest_priority_thread = highest_thread;
state.needs_scheduling.store(true);
return (1ULL << core_id);
} else {
return 0;
}
@@ -88,16 +94,29 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
ClearSchedulerUpdateNeeded(kernel);
u64 cores_needing_scheduling = 0, idle_cores = 0;
Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
/// We want to go over all cores, finding the highest priority thread and determining if
/// scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
if (top_thread != nullptr) {
// If the thread has no waiters, we need to check if the process has a thread pinned.
// TODO(bunnei): Implement thread pinning
if (top_thread->GetNumKernelWaiters() == 0) {
if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
pinned != nullptr && pinned != top_thread) {
// We prefer our parent's pinned thread if possible. However, we also don't
// want to schedule un-runnable threads.
if (pinned->GetRawState() == ThreadState::Runnable) {
top_thread = pinned;
} else {
top_thread = nullptr;
}
}
}
}
} else {
idle_cores |= (1ULL << core_id);
}
@@ -109,8 +128,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
u32 core_id = Common::CountTrailingZeroes64(idle_cores);
if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
@@ -118,7 +137,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_thread =
if (KThread* top_thread =
(suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
top_thread != suggested) {
// Make sure we're not dealing with threads too high priority for migration.
@@ -150,7 +169,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Check if there's some other thread that can run on the candidate core.
const s32 candidate_core = migration_candidates[i];
suggested = top_threads[candidate_core];
if (Thread* next_on_candidate_core =
if (KThread* next_on_candidate_core =
priority_queue.GetScheduledNext(candidate_core, suggested);
next_on_candidate_core != nullptr) {
// The candidate core can run some other thread! We'll migrate its current
@@ -180,22 +199,35 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
return cores_needing_scheduling;
}
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
// Get an atomic reference to the core scheduler's previous thread.
std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
// Atomically clear the previous thread if it's our target.
KThread* compare = thread;
prev_thread.compare_exchange_strong(compare, nullptr);
}
}
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Check if the state has changed, because if it hasn't there's nothing to do.
const auto cur_state = thread->scheduling_state;
const auto cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
// Update the priority queues.
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
if (old_state == ThreadState::Runnable) {
// If we were previously runnable, then we're not runnable now, and we should remove.
GetPriorityQueue(kernel).Remove(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
} else if (cur_state == ThreadState::Runnable) {
// If we're now runnable, then we weren't previously, and we should add.
GetPriorityQueue(kernel).PushBack(thread);
IncrementScheduledCount(thread);
@@ -203,13 +235,11 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol
}
}
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
u32 old_priority) {
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its priority in the queue.
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority(
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
IncrementScheduledCount(thread);
@@ -217,12 +247,12 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thr
}
}
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its affinity in the queue.
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
@@ -237,8 +267,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
auto& priority_queue = GetPriorityQueue(kernel);
// Rotate the front of the queue to the end.
Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
Thread* next_thread = nullptr;
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread* next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
if (next_thread != top_thread) {
@@ -249,11 +279,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// While we have a suggested thread, try to migrate it!
{
Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -285,15 +315,15 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// Now that we might have migrated a thread with the same priority, check if we can do better.
{
Thread* best_thread = priority_queue.GetScheduledFront(core_id);
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThread()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -302,7 +332,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -352,12 +382,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
}
}
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
Core::EmuThreadHandle global_thread) {
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
scheduler->GetCurrentThread()->EnableDispatch();
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
scheduler->GetCurrentThread()->EnableDispatch();
}
}
RescheduleCores(kernel, cores_needing_scheduling, global_thread);
RescheduleCores(kernel, cores_needing_scheduling);
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -372,15 +404,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().priority_queue;
}
void KScheduler::YieldWithoutCoreMigration() {
auto& kernel = system.Kernel();
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -395,10 +425,10 @@ void KScheduler::YieldWithoutCoreMigration() {
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
// Put the current thread at the back of the queue.
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
// If the next thread is different, we have an update to perform.
@@ -413,15 +443,13 @@ void KScheduler::YieldWithoutCoreMigration() {
}
}
void KScheduler::YieldWithCoreMigration() {
auto& kernel = system.Kernel();
void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -436,23 +464,23 @@ void KScheduler::YieldWithCoreMigration() {
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
// Put the current thread at the back of the queue.
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
// While we have a suggested thread, try to migrate it!
bool recheck = false;
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// Check if the suggested thread is the thread running on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* running_on_suggested_core =
if (KThread* running_on_suggested_core =
(suggested_core >= 0)
? kernel.Scheduler(suggested_core).state.highest_priority_thread
: nullptr;
@@ -503,15 +531,13 @@ void KScheduler::YieldWithCoreMigration() {
}
}
void KScheduler::YieldToAnyThread() {
auto& kernel = system.Kernel();
void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
Thread& cur_thread = *GetCurrentThread();
KThread& cur_thread = Kernel::GetCurrentThread(kernel);
Process& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -526,8 +552,8 @@ void KScheduler::YieldToAnyThread() {
{
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.scheduling_state;
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
// Get the current active core.
const s32 core_id = cur_thread.GetActiveCore();
@@ -539,11 +565,11 @@ void KScheduler::YieldToAnyThread() {
// If there's nothing scheduled, we can try to perform a migration.
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
// While we have a suggested thread, try to migrate it!
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
if (Thread* top_on_suggested_core =
if (KThread* top_on_suggested_core =
(suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
: nullptr;
top_on_suggested_core != suggested) {
@@ -581,22 +607,21 @@ void KScheduler::YieldToAnyThread() {
}
}
KScheduler::KScheduler(Core::System& system, std::size_t core_id)
: system(system), core_id(core_id) {
KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
this->state.needs_scheduling = true;
this->state.interrupt_task_thread_runnable = false;
this->state.should_count_idle = false;
this->state.idle_count = 0;
this->state.idle_thread_stack = nullptr;
this->state.highest_priority_thread = nullptr;
state.needs_scheduling.store(true);
state.interrupt_task_thread_runnable = false;
state.should_count_idle = false;
state.idle_count = 0;
state.idle_thread_stack = nullptr;
state.highest_priority_thread = nullptr;
}
KScheduler::~KScheduler() = default;
Thread* KScheduler::GetCurrentThread() const {
if (current_thread) {
return current_thread;
KThread* KScheduler::GetCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
}
return idle_thread;
}
@@ -613,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() {
phys_core.ClearInterrupt();
}
guard.lock();
if (this->state.needs_scheduling) {
if (state.needs_scheduling.load()) {
Schedule();
} else {
guard.unlock();
@@ -624,67 +649,76 @@ void KScheduler::OnThreadStart() {
SwitchContextStep2();
}
void KScheduler::Unload(Thread* thread) {
void KScheduler::Unload(KThread* thread) {
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
if (thread) {
thread->SetIsRunning(false);
if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
if (thread->IsCallingSvc()) {
system.ArmInterface(core_id).ExceptionalExit();
thread->SetContinuousOnSVC(false);
thread->ClearIsCallingSvc();
}
if (!thread->IsHLEThread() && !thread->HasExited()) {
if (!thread->IsTerminationRequested()) {
prev_thread = thread;
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
} else {
prev_thread = nullptr;
}
thread->context_guard.unlock();
}
}
void KScheduler::Reload(Thread* thread) {
if (thread) {
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
"Thread must be runnable.");
void KScheduler::Reload(KThread* thread) {
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
// Cancel any outstanding wakeup events for this thread
thread->SetIsRunning(true);
thread->SetWasRunning(false);
if (thread) {
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
auto* const thread_owner_process = thread->GetOwnerProcess();
if (thread_owner_process != nullptr) {
system.Kernel().MakeCurrentProcess(thread_owner_process);
}
if (!thread->IsHLEThread()) {
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
}
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
}
}
void KScheduler::SwitchContextStep2() {
// Load context of new thread
Reload(current_thread);
Reload(current_thread.load());
RescheduleCurrentCore();
}
void KScheduler::ScheduleImpl() {
Thread* previous_thread = current_thread;
current_thread = state.highest_priority_thread;
KThread* previous_thread = current_thread.load();
KThread* next_thread = state.highest_priority_thread;
this->state.needs_scheduling = false;
state.needs_scheduling = false;
if (current_thread == previous_thread) {
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) {
next_thread = idle_thread;
}
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
guard.unlock();
return;
}
current_thread.store(next_thread);
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -715,28 +749,29 @@ void KScheduler::SwitchToCurrent() {
while (true) {
{
std::scoped_lock lock{guard};
current_thread = state.highest_priority_thread;
this->state.needs_scheduling = false;
current_thread.store(state.highest_priority_thread);
state.needs_scheduling.store(false);
}
const auto is_switch_pending = [this] {
std::scoped_lock lock{guard};
return state.needs_scheduling.load(std::memory_order_relaxed);
return state.needs_scheduling.load();
};
do {
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
current_thread->context_guard.lock();
if (!current_thread->IsRunnable()) {
current_thread->context_guard.unlock();
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
next_thread->context_guard.lock();
if (next_thread->GetRawState() != ThreadState::Runnable) {
next_thread->context_guard.unlock();
break;
}
if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
current_thread->context_guard.unlock();
if (next_thread->GetActiveCore() != core_id) {
next_thread->context_guard.unlock();
break;
}
}
std::shared_ptr<Common::Fiber>* next_context;
if (current_thread != nullptr) {
next_context = &current_thread->GetHostContext();
if (next_thread != nullptr) {
next_context = &next_thread->GetHostContext();
} else {
next_context = &idle_thread->GetHostContext();
}
@@ -745,13 +780,13 @@ void KScheduler::SwitchToCurrent() {
}
}
void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
thread->UpdateCPUTimeTicks(update_ticks);
thread->AddCpuTime(core_id, update_ticks);
}
if (process != nullptr) {
@@ -765,15 +800,10 @@ void KScheduler::Initialize() {
std::string name = "Idle Thread Id:" + std::to_string(core_id);
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
nullptr, std::move(init_func), init_func_parameter);
auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
nullptr, std::move(init_func), init_func_parameter);
idle_thread = thread_res.Unwrap().get();
{
KScopedSchedulerLock lock{system.Kernel()};
idle_thread->SetStatus(ThreadStatus::Ready);
}
}
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)

View File

@@ -29,29 +29,33 @@ namespace Kernel {
class KernelCore;
class Process;
class SchedulerLock;
class Thread;
class KThread;
class KScheduler final {
public:
explicit KScheduler(Core::System& system, std::size_t core_id);
explicit KScheduler(Core::System& system, s32 core_id);
~KScheduler();
/// Reschedules to the next available thread (call after current thread is suspended)
void RescheduleCurrentCore();
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
Core::EmuThreadHandle global_thread);
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
/// The next two are for SingleCore Only.
/// Unload current thread before preempting core.
void Unload(Thread* thread);
void Unload(KThread* thread);
/// Reload current thread after core preemption.
void Reload(Thread* thread);
void Reload(KThread* thread);
/// Gets the current running thread
[[nodiscard]] Thread* GetCurrentThread() const;
[[nodiscard]] KThread* GetCurrentThread() const;
/// Returns true if the scheduler is idle
[[nodiscard]] bool IsIdle() const {
return GetCurrentThread() == idle_thread;
}
/// Gets the timestamp for the last context switch in ticks.
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
@@ -72,14 +76,14 @@ public:
return switch_fiber;
}
[[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
[[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
/**
* Takes a thread and moves it to the back of the it's priority list.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldWithoutCoreMigration();
static void YieldWithoutCoreMigration(KernelCore& kernel);
/**
* Takes a thread and moves it to the back of the it's priority list.
@@ -88,7 +92,7 @@ public:
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldWithCoreMigration();
static void YieldWithCoreMigration(KernelCore& kernel);
/**
* Takes a thread and moves it out of the scheduling queue.
@@ -97,17 +101,18 @@ public:
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
void YieldToAnyThread();
static void YieldToAnyThread(KernelCore& kernel);
static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
/// Notify the scheduler a thread's status has changed.
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
/// Notify the scheduler a thread's priority has changed.
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
u32 old_priority);
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
/// Notify the scheduler a thread's core and/or affinity mask has changed.
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core);
static bool CanSchedule(KernelCore& kernel);
@@ -115,8 +120,7 @@ public:
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
static void DisableScheduling(KernelCore& kernel);
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
Core::EmuThreadHandle global_thread);
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
private:
@@ -164,13 +168,15 @@ private:
* most recent tick count retrieved. No special arithmetic is
* applied to it.
*/
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
void UpdateLastContextSwitchTime(KThread* thread, Process* process);
static void OnSwitch(void* this_scheduler);
void SwitchToCurrent();
Thread* current_thread{};
Thread* idle_thread{};
KThread* prev_thread{};
std::atomic<KThread*> current_thread{};
KThread* idle_thread;
std::shared_ptr<Common::Fiber> switch_fiber{};
@@ -179,7 +185,7 @@ private:
bool interrupt_task_thread_runnable{};
bool should_count_idle{};
u64 idle_count{};
Thread* highest_priority_thread{};
KThread* highest_priority_thread{};
void* idle_thread_stack{};
};
@@ -187,7 +193,7 @@ private:
Core::System& system;
u64 last_context_switch_time{};
const std::size_t core_id;
const s32 core_id;
Common::SpinLock guard{};
};

View File

@@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
@@ -19,49 +20,48 @@ class KernelCore;
template <typename SchedulerType>
class KAbstractSchedulerLock {
public:
explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
bool IsLockedByCurrentThread() const {
return this->owner_thread == kernel.GetCurrentEmuThreadID();
return owner_thread == GetCurrentThreadPointer(kernel);
}
void Lock() {
if (this->IsLockedByCurrentThread()) {
if (IsLockedByCurrentThread()) {
// If we already own the lock, we can just increment the count.
ASSERT(this->lock_count > 0);
this->lock_count++;
ASSERT(lock_count > 0);
lock_count++;
} else {
// Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel);
this->spin_lock.lock();
spin_lock.lock();
// For debug, ensure that our state is valid.
ASSERT(this->lock_count == 0);
ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
ASSERT(lock_count == 0);
ASSERT(owner_thread == nullptr);
// Increment count, take ownership.
this->lock_count = 1;
this->owner_thread = kernel.GetCurrentEmuThreadID();
lock_count = 1;
owner_thread = GetCurrentThreadPointer(kernel);
}
}
void Unlock() {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(this->lock_count > 0);
ASSERT(IsLockedByCurrentThread());
ASSERT(lock_count > 0);
// Release an instance of the lock.
if ((--this->lock_count) == 0) {
if ((--lock_count) == 0) {
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel);
Core::EmuThreadHandle leaving_thread = owner_thread;
// Note that we no longer hold the lock, and unlock the spinlock.
this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
this->spin_lock.unlock();
owner_thread = nullptr;
spin_lock.unlock();
// Enable scheduling, and perform a rescheduling operation.
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
}
}
@@ -69,7 +69,7 @@ private:
KernelCore& kernel;
Common::SpinLock spin_lock{};
s32 lock_count{};
Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
KThread* owner_thread{};
};
} // namespace Kernel

View File

@@ -9,27 +9,24 @@
#include "common/common_types.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/kernel/time_manager.h"
namespace Kernel {
class KScopedSchedulerLockAndSleep {
public:
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
s64 timeout)
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
event_handle = InvalidHandle;
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
: kernel(kernel), thread(t), timeout_tick(timeout) {
// Lock the scheduler.
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
}
~KScopedSchedulerLockAndSleep() {
// Register the sleep.
if (this->timeout_tick > 0) {
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
if (timeout_tick > 0) {
kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
}
// Unlock the scheduler.
@@ -37,13 +34,12 @@ public:
}
void CancelSleep() {
this->timeout_tick = 0;
timeout_tick = 0;
}
private:
KernelCore& kernel;
Handle& event_handle;
Thread* thread{};
KThread* thread{};
s64 timeout_tick{};
};

View File

@@ -0,0 +1,171 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/assert.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout) {
// Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects);
// Prepare for wait.
KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
{
// Setup the scheduling lock and sleep.
KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
// Check if any of the objects are already signaled.
for (auto i = 0; i < num_objects; ++i) {
ASSERT(objects[i] != nullptr);
if (objects[i]->IsSignaled()) {
*out_index = i;
slp.CancelSleep();
return RESULT_SUCCESS;
}
}
// Check if the timeout is zero.
if (timeout == 0) {
slp.CancelSleep();
return Svc::ResultTimedOut;
}
// Check if the thread should terminate.
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
return Svc::ResultTerminationRequested;
}
// Check if waiting was canceled.
if (thread->IsWaitCancelled()) {
slp.CancelSleep();
thread->ClearWaitCancelled();
return Svc::ResultCancelled;
}
// Add the waiters.
for (auto i = 0; i < num_objects; ++i) {
thread_nodes[i].thread = thread;
thread_nodes[i].next = nullptr;
if (objects[i]->thread_list_tail == nullptr) {
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
} else {
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
}
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
}
// For debugging only
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
// Mark the thread as waiting.
thread->SetCancellable();
thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
thread->SetState(ThreadState::Waiting);
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
}
// The lock/sleep is done, so we should be able to get our result.
// Thread is no longer cancellable.
thread->ClearCancellable();
// For debugging only
thread->SetWaitObjectsForDebugging({});
// Cancel the timer as needed.
kernel.TimeManager().UnscheduleTimeEvent(thread);
// Get the wait result.
ResultCode wait_result{RESULT_SUCCESS};
s32 sync_index = -1;
{
KScopedSchedulerLock lock(kernel);
KSynchronizationObject* synced_obj;
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
for (auto i = 0; i < num_objects; ++i) {
// Unlink the object from the list.
ThreadListNode* prev_ptr =
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
ThreadListNode* prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
do {
prev = prev_ptr;
prev_ptr = prev_ptr->next;
tail_prev = prev_val;
prev_val = prev_ptr;
} while (prev_ptr != std::addressof(thread_nodes[i]));
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
objects[i]->thread_list_tail = tail_prev;
}
prev->next = thread_nodes[i].next;
if (objects[i] == synced_obj) {
sync_index = i;
}
}
}
// Set output.
*out_index = sync_index;
return wait_result;
}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name)
: Object{kernel, std::move(name)} {}
KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
KScopedSchedulerLock lock(kernel);
// If we're not signaled, we've nothing to notify.
if (!this->IsSignaled()) {
return;
}
// Iterate over each thread.
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
KThread* thread = cur_node->thread;
if (thread->GetState() == ThreadState::Waiting) {
thread->SetSyncedObject(this, result);
thread->SetState(ThreadState::Runnable);
}
}
}
std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
std::vector<KThread*> threads;
// If debugging, dump the list of waiters.
{
KScopedSchedulerLock lock(kernel);
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
threads.emplace_back(cur_node->thread);
}
}
return threads;
}
} // namespace Kernel

View File

@@ -0,0 +1,59 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "core/hle/kernel/object.h"
#include "core/hle/result.h"
namespace Kernel {
class KernelCore;
class Synchronization;
class KThread;
/// Class that represents a Kernel object that a thread can be waiting on
class KSynchronizationObject : public Object {
public:
struct ThreadListNode {
ThreadListNode* next{};
KThread* thread{};
};
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
KSynchronizationObject** objects, const s32 num_objects,
s64 timeout);
[[nodiscard]] virtual bool IsSignaled() const = 0;
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
protected:
explicit KSynchronizationObject(KernelCore& kernel);
explicit KSynchronizationObject(KernelCore& kernel, std::string&& name);
virtual ~KSynchronizationObject();
void NotifyAvailable(ResultCode result);
void NotifyAvailable() {
return this->NotifyAvailable(RESULT_SUCCESS);
}
private:
ThreadListNode* thread_list_head{};
ThreadListNode* thread_list_tail{};
};
// Specialization of DynamicObjectCast for KSynchronizationObjects
template <>
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
std::shared_ptr<Object> object) {
if (object != nullptr && object->IsWaitable()) {
return std::static_pointer_cast<KSynchronizationObject>(object);
}
return nullptr;
}
} // namespace Kernel

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,768 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <span>
#include <string>
#include <utility>
#include <vector>
#include <boost/intrusive/list.hpp>
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/svc_common.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
namespace Common {
class Fiber;
}
namespace Core {
class ARM_Interface;
class System;
} // namespace Core
namespace Kernel {
class GlobalSchedulerContext;
class KernelCore;
class Process;
class KScheduler;
class KThreadQueue;
using KThreadFunction = VAddr;
enum class ThreadType : u32 {
Main = 0,
Kernel = 1,
HighPriority = 2,
User = 3,
};
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
enum class SuspendType : u32 {
Process = 0,
Thread = 1,
Debug = 2,
Backtrace = 3,
Init = 4,
Count,
};
enum class ThreadState : u16 {
Initialized = 0,
Waiting = 1,
Runnable = 2,
Terminated = 3,
SuspendShift = 4,
Mask = (1 << SuspendShift) - 1,
ProcessSuspended = (1 << (0 + SuspendShift)),
ThreadSuspended = (1 << (1 + SuspendShift)),
DebugSuspended = (1 << (2 + SuspendShift)),
BacktraceSuspended = (1 << (3 + SuspendShift)),
InitSuspended = (1 << (4 + SuspendShift)),
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
};
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
enum class DpcFlag : u32 {
Terminating = (1 << 0),
Terminated = (1 << 1),
};
enum class ThreadWaitReasonForDebugging : u32 {
None, ///< Thread is not waiting
Sleep, ///< Thread is waiting due to a SleepThread SVC
IPC, ///< Thread is waiting for the reply from an IPC request
Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
Suspended, ///< Thread is waiting due to process suspension
};
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
friend class KScheduler;
friend class Process;
public:
static constexpr s32 DefaultThreadPriority = 44;
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
explicit KThread(KernelCore& kernel);
~KThread() override;
public:
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
using WaiterList = boost::intrusive::list<KThread>;
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
/**
* Creates and returns a new thread. The new thread is immediately scheduled
* @param system The instance of the whole system
* @param name The friendly name desired for the thread
* @param entry_point The address at which the thread should start execution
* @param priority The thread's priority
* @param arg User data to pass to the thread
* @param processor_id The ID(s) of the processors on which the thread is desired to be run
* @param stack_top The address of the thread's stack top
* @param owner_process The parent process for the thread, if null, it's a kernel thread
* @param thread_start_func The function where the host context will start.
* @param thread_start_parameter The parameter which will passed to host context on init
* @return A shared pointer to the newly created thread
*/
[[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
[[nodiscard]] std::string GetName() const override {
return name;
}
void SetName(std::string new_name) {
name = std::move(new_name);
}
[[nodiscard]] std::string GetTypeName() const override {
return "Thread";
}
static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
[[nodiscard]] HandleType GetHandleType() const override {
return HANDLE_TYPE;
}
/**
* Gets the thread's current priority
* @return The current thread's priority
*/
[[nodiscard]] s32 GetPriority() const {
return priority;
}
/**
* Sets the thread's current priority.
* @param priority The new priority.
*/
void SetPriority(s32 value) {
priority = value;
}
/**
* Gets the thread's nominal priority.
* @return The current thread's nominal priority.
*/
[[nodiscard]] s32 GetBasePriority() const {
return base_priority;
}
/**
* Gets the thread's thread ID
* @return The thread's ID
*/
[[nodiscard]] u64 GetThreadID() const {
return thread_id;
}
void ContinueIfHasKernelWaiters() {
if (GetNumKernelWaiters() > 0) {
Continue();
}
}
void Wakeup();
void SetBasePriority(s32 value);
[[nodiscard]] ResultCode Run();
void Exit();
[[nodiscard]] u32 GetSuspendFlags() const {
return suspend_allowed_flags & suspend_request_flags;
}
[[nodiscard]] bool IsSuspended() const {
return GetSuspendFlags() != 0;
}
[[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
return (suspend_request_flags &
(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
0;
}
[[nodiscard]] bool IsSuspendRequested() const {
return suspend_request_flags != 0;
}
void RequestSuspend(SuspendType type);
void Resume(SuspendType type);
void TrySuspend();
void Continue();
void Suspend();
void Finalize() override;
bool IsSignaled() const override;
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
synced_object = obj;
wait_result = wait_res;
}
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
*out = synced_object;
return wait_result;
}
/*
* Returns the Thread Local Storage address of the current thread
* @returns VAddr of the thread's TLS
*/
[[nodiscard]] VAddr GetTLSAddress() const {
return tls_address;
}
/*
* Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
* @returns The value of the TPIDR_EL0 register.
*/
[[nodiscard]] u64 GetTPIDR_EL0() const {
return thread_context_64.tpidr;
}
/// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
void SetTPIDR_EL0(u64 value) {
thread_context_64.tpidr = value;
thread_context_32.tpidr = static_cast<u32>(value);
}
[[nodiscard]] ThreadContext32& GetContext32() {
return thread_context_32;
}
[[nodiscard]] const ThreadContext32& GetContext32() const {
return thread_context_32;
}
[[nodiscard]] ThreadContext64& GetContext64() {
return thread_context_64;
}
[[nodiscard]] const ThreadContext64& GetContext64() const {
return thread_context_64;
}
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
[[nodiscard]] ThreadState GetState() const {
return thread_state & ThreadState::Mask;
}
[[nodiscard]] ThreadState GetRawState() const {
return thread_state;
}
void SetState(ThreadState state);
[[nodiscard]] s64 GetLastScheduledTick() const {
return last_scheduled_tick;
}
void SetLastScheduledTick(s64 tick) {
last_scheduled_tick = tick;
}
void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
cpu_time += amount;
// TODO(bunnei): Debug kernels track per-core tick counts. Should we?
}
[[nodiscard]] s64 GetCpuTime() const {
return cpu_time;
}
[[nodiscard]] s32 GetActiveCore() const {
return core_id;
}
void SetActiveCore(s32 core) {
core_id = core;
}
[[nodiscard]] s32 GetCurrentCore() const {
return current_core_id;
}
void SetCurrentCore(s32 core) {
current_core_id = core;
}
[[nodiscard]] Process* GetOwnerProcess() {
return parent;
}
[[nodiscard]] const Process* GetOwnerProcess() const {
return parent;
}
[[nodiscard]] bool IsUserThread() const {
return parent != nullptr;
}
[[nodiscard]] KThread* GetLockOwner() const {
return lock_owner;
}
void SetLockOwner(KThread* owner) {
lock_owner = owner;
}
[[nodiscard]] const KAffinityMask& GetAffinityMask() const {
return physical_affinity_mask;
}
[[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
[[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
[[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
[[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
[[nodiscard]] ResultCode Sleep(s64 timeout);
[[nodiscard]] s64 GetYieldScheduleCount() const {
return schedule_count;
}
void SetYieldScheduleCount(s64 count) {
schedule_count = count;
}
void WaitCancel();
[[nodiscard]] bool IsWaitCancelled() const {
return wait_cancelled;
}
[[nodiscard]] void ClearWaitCancelled() {
wait_cancelled = false;
}
[[nodiscard]] bool IsCancellable() const {
return cancellable;
}
void SetCancellable() {
cancellable = true;
}
void ClearCancellable() {
cancellable = false;
}
[[nodiscard]] bool IsTerminationRequested() const {
return termination_requested || GetRawState() == ThreadState::Terminated;
}
struct StackParameters {
u8 svc_permission[0x10];
std::atomic<u8> dpc_flags;
u8 current_svc_id;
bool is_calling_svc;
bool is_in_exception_handler;
bool is_pinned;
s32 disable_count;
KThread* cur_thread;
};
[[nodiscard]] StackParameters& GetStackParameters() {
return stack_parameters;
}
[[nodiscard]] const StackParameters& GetStackParameters() const {
return stack_parameters;
}
class QueueEntry {
public:
constexpr QueueEntry() = default;
constexpr void Initialize() {
prev = nullptr;
next = nullptr;
}
constexpr KThread* GetPrev() const {
return prev;
}
constexpr KThread* GetNext() const {
return next;
}
constexpr void SetPrev(KThread* thread) {
prev = thread;
}
constexpr void SetNext(KThread* thread) {
next = thread;
}
private:
KThread* prev{};
KThread* next{};
};
[[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
return per_core_priority_queue_entry[core];
}
[[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
return per_core_priority_queue_entry[core];
}
void SetSleepingQueue(KThreadQueue* q) {
sleeping_queue = q;
}
[[nodiscard]] s32 GetDisableDispatchCount() const {
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
void Pin();
void Unpin();
void SetInExceptionHandler() {
this->GetStackParameters().is_in_exception_handler = true;
}
void ClearInExceptionHandler() {
this->GetStackParameters().is_in_exception_handler = false;
}
[[nodiscard]] bool IsInExceptionHandler() const {
return this->GetStackParameters().is_in_exception_handler;
}
void SetIsCallingSvc() {
this->GetStackParameters().is_calling_svc = true;
}
void ClearIsCallingSvc() {
this->GetStackParameters().is_calling_svc = false;
}
[[nodiscard]] bool IsCallingSvc() const {
return this->GetStackParameters().is_calling_svc;
}
[[nodiscard]] u8 GetSvcId() const {
return this->GetStackParameters().current_svc_id;
}
void RegisterDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
}
void ClearDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
}
[[nodiscard]] u8 GetDpc() const {
return this->GetStackParameters().dpc_flags;
}
[[nodiscard]] bool HasDpc() const {
return this->GetDpc() != 0;
}
void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
wait_reason_for_debugging = reason;
}
[[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
return wait_reason_for_debugging;
}
[[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
return thread_type_for_debugging;
}
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
wait_objects_for_debugging.clear();
wait_objects_for_debugging.reserve(objects.size());
for (const auto& object : objects) {
wait_objects_for_debugging.emplace_back(object);
}
}
[[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
return wait_objects_for_debugging;
}
void SetMutexWaitAddressForDebugging(VAddr address) {
mutex_wait_address_for_debugging = address;
}
[[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
return mutex_wait_address_for_debugging;
}
[[nodiscard]] s32 GetIdealCoreForDebugging() const {
return virtual_ideal_core_id;
}
void AddWaiter(KThread* thread);
void RemoveWaiter(KThread* thread);
[[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
[[nodiscard]] VAddr GetAddressKey() const {
return address_key;
}
[[nodiscard]] u32 GetAddressKeyValue() const {
return address_key_value;
}
void SetAddressKey(VAddr key) {
address_key = key;
}
void SetAddressKey(VAddr key, u32 val) {
address_key = key;
address_key_value = val;
}
[[nodiscard]] bool HasWaiters() const {
return !waiter_list.empty();
}
[[nodiscard]] s32 GetNumKernelWaiters() const {
return num_kernel_waiters;
}
[[nodiscard]] u64 GetConditionVariableKey() const {
return condvar_key;
}
[[nodiscard]] u64 GetAddressArbiterKey() const {
return condvar_key;
}
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
std::array<Handle,
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
handles;
constexpr SyncObjectBuffer() {}
};
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
struct ConditionVariableComparator {
struct LightCompareType {
u64 cv_key{};
s32 priority{};
[[nodiscard]] constexpr u64 GetConditionVariableKey() const {
return cv_key;
}
[[nodiscard]] constexpr s32 GetPriority() const {
return priority;
}
};
template <typename T>
requires(
std::same_as<T, KThread> ||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();
if (l_key < r_key) {
// Sort first by key
return -1;
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
// And then by priority.
return -1;
} else {
return 1;
}
}
};
void AddWaiterImpl(KThread* thread);
void RemoveWaiterImpl(KThread* thread);
void StartTermination();
[[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, Process* owner, ThreadType type);
[[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 core, Process* owner, ThreadType type);
static void RestorePriority(KernelCore& kernel, KThread* thread);
// For core KThread implementation
ThreadContext32 thread_context_32{};
ThreadContext64 thread_context_64{};
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
s32 priority{};
using ConditionVariableThreadTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
&KThread::condvar_arbiter_tree_node>;
using ConditionVariableThreadTree =
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
ConditionVariableThreadTree* condvar_tree{};
u64 condvar_key{};
u64 virtual_affinity_mask{};
KAffinityMask physical_affinity_mask{};
u64 thread_id{};
std::atomic<s64> cpu_time{};
KSynchronizationObject* synced_object{};
VAddr address_key{};
Process* parent{};
VAddr kernel_stack_top{};
u32* light_ipc_data{};
VAddr tls_address{};
KLightLock activity_pause_lock;
s64 schedule_count{};
s64 last_scheduled_tick{};
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
KThreadQueue* sleeping_queue{};
WaiterList waiter_list{};
WaiterList pinned_waiter_list{};
KThread* lock_owner{};
u32 address_key_value{};
u32 suspend_request_flags{};
u32 suspend_allowed_flags{};
ResultCode wait_result{RESULT_SUCCESS};
s32 base_priority{};
s32 physical_ideal_core_id{};
s32 virtual_ideal_core_id{};
s32 num_kernel_waiters{};
s32 current_core_id{};
s32 core_id{};
KAffinityMask original_physical_affinity_mask{};
s32 original_physical_ideal_core_id{};
s32 num_core_migration_disables{};
ThreadState thread_state{};
std::atomic<bool> termination_requested{};
bool wait_cancelled{};
bool cancellable{};
bool signaled{};
bool initialized{};
bool debug_attached{};
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
Common::SpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
ThreadType thread_type_for_debugging{};
std::string name;
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
u32 value) {
condvar_tree = tree;
condvar_key = cv_key;
address_key = address;
address_key_value = value;
}
void ClearConditionVariable() {
condvar_tree = nullptr;
}
[[nodiscard]] bool IsWaitingForConditionVariable() const {
return condvar_tree != nullptr;
}
void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
condvar_tree = tree;
condvar_key = address;
}
void ClearAddressArbiter() {
condvar_tree = nullptr;
}
[[nodiscard]] bool IsWaitingForAddressArbiter() const {
return condvar_tree != nullptr;
}
[[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
return condvar_tree;
}
};
} // namespace Kernel

View File

@@ -0,0 +1,81 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/kernel/k_thread.h"
namespace Kernel {
class KThreadQueue {
public:
explicit KThreadQueue(KernelCore& kernel) : kernel{kernel} {}
bool IsEmpty() const {
return wait_list.empty();
}
KThread::WaiterList::iterator begin() {
return wait_list.begin();
}
KThread::WaiterList::iterator end() {
return wait_list.end();
}
bool SleepThread(KThread* t) {
KScopedSchedulerLock sl{kernel};
// If the thread needs terminating, don't enqueue it.
if (t->IsTerminationRequested()) {
return false;
}
// Set the thread's queue and mark it as waiting.
t->SetSleepingQueue(this);
t->SetState(ThreadState::Waiting);
// Add the thread to the queue.
wait_list.push_back(*t);
return true;
}
void WakeupThread(KThread* t) {
KScopedSchedulerLock sl{kernel};
// Remove the thread from the queue.
wait_list.erase(wait_list.iterator_to(*t));
// Mark the thread as no longer sleeping.
t->SetState(ThreadState::Runnable);
t->SetSleepingQueue(nullptr);
}
KThread* WakeupFrontThread() {
KScopedSchedulerLock sl{kernel};
if (wait_list.empty()) {
return nullptr;
} else {
// Remove the thread from the queue.
auto it = wait_list.begin();
KThread* thread = std::addressof(*it);
wait_list.erase(it);
ASSERT(thread->GetState() == ThreadState::Waiting);
// Mark the thread as no longer sleeping.
thread->SetState(ThreadState::Runnable);
thread->SetSleepingQueue(nullptr);
return thread;
}
}
private:
KernelCore& kernel;
KThread::WaiterList wait_list{};
};
} // namespace Kernel

Some files were not shown because too many files have changed in this diff Show More