Compare commits

...

211 Commits

Author SHA1 Message Date
yuzubot
40ae8fe96a "Merge Tagged PR 1012" 2019-11-03 13:01:27 +00:00
yuzubot
5bb2494306 "Merge Tagged PR 1340" 2019-11-03 13:01:25 +00:00
yuzubot
1527ea18f8 "Merge Tagged PR 1703" 2019-11-03 13:01:25 +00:00
yuzubot
c71844d224 "Merge Tagged PR 2365" 2019-11-03 13:01:24 +00:00
yuzubot
0ded864000 "Merge Tagged PR 2542" 2019-11-03 13:01:23 +00:00
yuzubot
d3069570ad "Merge Tagged PR 2859" 2019-11-03 13:01:22 +00:00
yuzubot
30d397f778 "Merge Tagged PR 2914" 2019-11-03 13:01:21 +00:00
yuzubot
07fdad8d81 "Merge Tagged PR 2945" 2019-11-03 13:01:20 +00:00
yuzubot
7162470cce "Merge Tagged PR 2987" 2019-11-03 13:01:19 +00:00
yuzubot
b801ad78e7 "Merge Tagged PR 3047" 2019-11-03 13:01:19 +00:00
yuzubot
decd0a012f "Merge Tagged PR 3057" 2019-11-03 13:01:18 +00:00
yuzubot
2a8d14285f "Merge Tagged PR 3062" 2019-11-03 13:01:17 +00:00
bunnei
ae6eb61892 Merge pull request #3059 from FearlessTobi/stub-am-commands
core/am: Stub InitializeApplicationCopyrightFrameBuffer, SetApplicationCopyrightImage and SetApplicationCopyrightVisibility
2019-11-03 05:26:33 -05:00
Mat M
2b4208254e Merge pull request #3060 from FearlessTobi/patch-1
common/bit_field: Remove FORCE_INLINE calls
2019-11-03 04:59:06 -05:00
bunnei
84887b0088 Merge pull request #3064 from yuzu-emu/revert-3063-zero-init-padding
Revert "common_func: Use std::array for INSERT_PADDING_* macros."
2019-11-03 04:40:04 -05:00
bunnei
4edf73344f Revert "common_func: Use std::array for INSERT_PADDING_* macros." 2019-11-03 04:39:51 -05:00
bunnei
8c1e38f744 Merge pull request #3063 from bunnei/zero-init-padding
common_func: Use std::array for INSERT_PADDING_* macros.
2019-11-03 04:35:12 -05:00
bunnei
fdc5791b08 common_func: Use std::array for INSERT_PADDING_* macros.
- Zero initialization here is useful for determinism.
2019-11-03 04:30:16 -05:00
Tobias
a81987a7cb common/bit_field: Remove FORCE_INLINE calls
See bunneis comment here https://github.com/citra-emu/citra/pull/4629#discussion_r258533167.
They were supposed to be removed by him, but he missed them.
2019-11-03 08:25:37 +01:00
FearlessTobi
832c138eff core/am: Stub InitializeApplicationCopyrightFrameBuffer, SetApplicationCopyrightImage and SetApplicationCopyrightVisibility
These commands require Screenshots to be implemented anyway, so they are safe to stub for now.
2019-11-03 07:37:38 +01:00
bunnei
bec7e3b7d9 Merge pull request #3058 from FearlessTobi/port-4948
Port citra-emu/citra#4948: "citra_qt: add amiibo drag and drop support"
2019-11-03 01:55:21 -04:00
FearlessTobi
727ba2f2d0 citra_qt: add amiibo drag and drop support
Co-Authored-By: Valentin Vanelslande <vvanelslandedev@gmail.com>
2019-11-03 05:24:47 +01:00
Rodrigo Locatti
11e39da02b Merge pull request #3054 from FernandoS27/fix-tld4-2
shader_ir: Fix regression on TLD4
2019-10-31 01:56:29 +00:00
Fernando Sahmkow
23cabc98db Shader_IR: Fix regression on TLD4
Originally on the last commit I thought TLD4 acted the same as TLD4S and 
didn't have a mask. It actually does have a component mask. This commit 
corrects that.
2019-10-30 21:14:57 -04:00
Rodrigo Locatti
658489ebf7 Merge pull request #3050 from FernandoS27/fix-tld4
shader_ir: Fix TLD4 and add bindless variant
2019-10-30 18:37:17 +00:00
Fernando Sahmkow
9293c3a0f2 Shader_IR: Fix TLD4 and add Bindless Variant.
This commit fixes an issue where not all 4 results of tld4 were being
written, the color component was defaulted to red, among other things.
It also implements the bindless variant.
2019-10-30 12:02:03 -04:00
Rodrigo Locatti
04b838c857 Merge pull request #3038 from lioncash/docs
kernel/scheduler: Minor changes
2019-10-30 03:47:28 +00:00
bunnei
2382bbe3ac Merge pull request #3046 from ReinUsesLisp/clean-gl-state
gl_state: Miscellaneous clean up
2019-10-29 22:50:04 -04:00
bunnei
b5138f3c35 Merge pull request #3035 from ReinUsesLisp/rasterizer-accelerated
rasterizer_accelerated: Add intermediary for GPU rasterizers
2019-10-29 22:06:41 -04:00
bunnei
a81bd962ab Merge pull request #3007 from DarkLordZach/fsc-regress
savedata_factory: Automatically create certain savedata
2019-10-29 22:05:09 -04:00
Rodrigo Locatti
3d0cde6a75 gl_state: Use std::array::fill instead of std::fill
Co-Authored-By: Mat M. <mathew1800@gmail.com>
2019-10-30 01:30:31 +00:00
ReinUsesLisp
ce20ed8e4e gl_state: Move dirty checks to individual apply calls instead of Apply
This requires removing constness from some methods, but for consistency
it's removed in all methods.
2019-10-29 21:27:25 -03:00
ReinUsesLisp
3c6557c235 gl_state: Remove ApplyDefaultState
OpenGL has defaults values we can trust. Remove these.
2019-10-29 21:27:25 -03:00
ReinUsesLisp
d3651b0b82 gl_state: Change SetDefaultViewports to use default constructor 2019-10-29 21:27:24 -03:00
ReinUsesLisp
c7698d0bc8 gl_state: Minor style changes 2019-10-29 21:27:24 -03:00
ReinUsesLisp
a14d202ac2 gl_state: Remove unused Citra TextureUnits 2019-10-29 21:27:24 -03:00
ReinUsesLisp
28fece8e9b gl_state: Move initializers from constructor to class declaration 2019-10-29 21:27:23 -03:00
Rodrigo Locatti
2ec5b55ee3 Merge pull request #3004 from ReinUsesLisp/maxwell3d-cleanup
maxwell_3d: Remove unused entries
2019-10-29 23:46:33 +00:00
Rodrigo Locatti
9f93ad08a5 Merge pull request #3023 from lioncash/opus
externals: Track upstream opus
2019-10-28 02:45:01 -03:00
Rodrigo Locatti
c5d9589942 Merge pull request #3037 from FernandoS27/new-formats
video_core: Implement texture format E5B9G9R9_SHAREDEXP.
2019-10-28 01:36:58 -03:00
Lioncash
6c8f28813c scheduler: Mark parameter of AskForReselectionOrMarkRedundant() as const
This is only compared against, so it can be made const.
2019-10-27 23:35:50 -04:00
ReinUsesLisp
fa31e5b868 maxwell_3d/kepler_compute: Remove unused arguments in GetTexture 2019-10-28 00:23:42 -03:00
ReinUsesLisp
538ddd220e video_core/textures: Remove unused index entry in FullTextureInfo 2019-10-28 00:14:38 -03:00
ReinUsesLisp
961fe4d19b maxwell_3d: Remove unused method GetStageTextures 2019-10-28 00:14:29 -03:00
Lioncash
f19c1a7cda scheduler: Silence sign conversion warnings 2019-10-27 22:44:52 -04:00
Lioncash
2fb0bbff29 scheduler: Initialize class members directly where applicable
Reduces the overall amount of code.
2019-10-27 22:13:55 -04:00
Lioncash
2dc469ceba scheduler: Amend documentation comments
Adjusts the formatting of a few of the comments an ensures they get
recognized as proper Doxygen comments.
2019-10-27 22:12:32 -04:00
David
4c5731c34f Merge pull request #2971 from FernandoS27/new-scheduler-v2
Kernel: Implement a New Thread Scheduler V2
2019-10-28 10:53:27 +11:00
Fernando Sahmkow
3f9262195b Video_Core: Implement texture format E5B9G9R9_SHAREDEXP.
This commit implements the E5B9G9R9 Texture format into the general 
system and OpenGL backend.
2019-10-27 16:44:09 -04:00
bunnei
6909b2f0f9 Merge pull request #3034 from ReinUsesLisp/w4244-maxwell3d
maxwell_3d: Silence implicit conversion warnings
2019-10-27 15:08:59 -04:00
ReinUsesLisp
3e469cecc1 maxwell_3d: Silence implicit conversion warnings
While we are at it, unify types for dirty reg pointers.
2019-10-27 15:22:17 -03:00
bunnei
7e2494e987 Merge pull request #3033 from ReinUsesLisp/w4244-astc
astc: Silence implicit conversion warnings
2019-10-27 14:09:53 -04:00
ReinUsesLisp
bd2aff3e26 rasterizer_accelerated: Add intermediary for GPU rasterizers
Add an intermediary class that implements common functions across GPU
accelerated rasterizers. This avoids code repetition on different
backends.
2019-10-27 03:40:08 -03:00
ReinUsesLisp
a5aa1bb174 astc: Silence implicit conversion warnings 2019-10-27 03:04:50 -03:00
Rodrigo Locatti
26f3e18c5c Merge pull request #2976 from FernandoS27/cache-fast-brx-rebased
Implement Fast BRX, fix TXQ and addapt the Shader Cache for it
2019-10-26 16:56:13 -03:00
Fernando Sahmkow
be856a38d6 Shader_IR: Address Feedback. 2019-10-26 15:38:30 -04:00
Rodrigo Locatti
a0d79085c4 Merge pull request #3027 from lioncash/lookup
shader_ir: Use std::array with std::pair instead of std::unordered_map
2019-10-26 05:49:15 -03:00
Rodrigo Locatti
d52598173d Merge pull request #3013 from FernandoS27/tld4s-fix
Shader_Ir: Fix TLD4S from using a component mask.
2019-10-25 20:06:26 -03:00
Fernando Sahmkow
e3afd6595a Shader_IR: Clang format 2019-10-25 09:01:32 -04:00
ReinUsesLisp
78f3e8a757 gl_shader_cache: Implement locker variants invalidation 2019-10-25 09:01:32 -04:00
ReinUsesLisp
ec85648af3 gl_shader_disk_cache: Store and load fast BRX 2019-10-25 09:01:31 -04:00
ReinUsesLisp
fa2c297f3e const_buffer_locker: Minor style changes 2019-10-25 09:01:31 -04:00
ReinUsesLisp
7b81ba4d8a gl_shader_decompiler: Move entries to a separate function 2019-10-25 09:01:31 -04:00
Fernando Sahmkow
1244f2d368 Shader_IR: Implement Fast BRX and allow multi-branches in the CFG. 2019-10-25 09:01:31 -04:00
Fernando Sahmkow
a05120ec0b Shader_IR: Correct typo in Consistent method. 2019-10-25 09:01:30 -04:00
Fernando Sahmkow
33fcec3502 Shader_IR: allow lookup of texture samplers within the shader_ir for instructions that don't provide it 2019-10-25 09:01:30 -04:00
Fernando Sahmkow
8909f52166 Shader_IR: Implement Fast BRX and allow multi-branches in the CFG. 2019-10-25 09:01:30 -04:00
Fernando Sahmkow
acd6441134 Shader_Cache: setup connection of ConstBufferLocker 2019-10-25 09:01:29 -04:00
Fernando Sahmkow
1a58f45d76 VideoCore: Unify const buffer accessing along engines and provide ConstBufferLocker class to shaders. 2019-10-25 09:01:29 -04:00
Fernando Sahmkow
2ef696c85a Shader_IR: Implement BRX tracking. 2019-10-25 09:01:29 -04:00
James Rowe
5ee4fb6e12 Merge pull request #3029 from jroweboy/revert
Revert "ci: Add build name to archive root folder"
2019-10-24 12:53:30 -06:00
James Rowe
969f0afa4e Revert "ci: Add build name to archive root folder"
This reverts commit 5e553a6c26.
2019-10-24 12:46:15 -06:00
Rodrigo Locatti
5062728669 Merge pull request #3028 from lioncash/constexpr
shader_bytecode: Make Matcher constexpr capable
2019-10-24 15:10:40 -03:00
Lioncash
7fdf991097 shader_bytecode: Make Matcher constexpr capable
Greatly shrinks the amount of generated code for GetDecodeTable().

Collapses an assembly output of 9000+ lines down to ~3621 with Clang,
and 6513 down to ~2616 with GCC, given it's now allowed to construct all
the entries as a sequence of constant data.
2019-10-24 01:10:10 -04:00
Lioncash
382717172e shader_ir: Use std::array with pair instead of unordered_map
Given the overall size of the maps are very small, we can use arrays of
pairs here instead of always heap allocating a new map every time the
functions are called. Given the small size of the maps, the difference
in container lookups are negligible, especially given the entries are
already sorted.
2019-10-24 00:25:38 -04:00
Rodrigo Locatti
5328d570df Merge pull request #3024 from lioncash/shadow
video_core/shader: Resolve instances of variable shadowing
2019-10-24 00:45:23 -03:00
Lioncash
1f5401c89c video_core/shader: Resolve instances of variable shadowing
Silences a few -Wshadow warnings.
2019-10-23 23:00:31 -04:00
Lioncash
611236c883 externals: Track upstream opus
Tracks upstream opus, allowing the library to be easily updated. While
we're at it, we incorporate the CMakeLists.txt so that we have easy
control over the requirements of the build.
2019-10-23 20:58:54 -04:00
bunnei
012d7f5233 Merge pull request #3022 from DarkLordZach/azure-folder-rename
ci: Add build name to archive root folder
2019-10-23 15:52:37 -04:00
Zach Hilman
5e553a6c26 ci: Add build name to archive root folder 2019-10-23 15:23:43 -04:00
bunnei
6fe89acf0d Merge pull request #2991 from lioncash/npad
hid/npad: Minor cleanup
2019-10-22 19:51:24 -04:00
Zach Hilman
bb207fe27a savedata_factory: Automatically create certain savedata
After further hardware investigation, it appears that some games, perhaps those more lazily coded, will not call EnsureSaveData, meaning that they expect the normal (current) save to be automatically made. Additionally, some games do not create a cache or temporary save before use.
In these 3 specific instances, the save is created automatically for the game if it doesn't exist.
2019-10-22 15:47:38 -04:00
Fernando Sahmkow
c4a0aa9207 Merge pull request #2995 from ReinUsesLisp/ignore-gmem
shader_ir/memory: Ignore global memory when tracking fails
2019-10-22 13:22:43 -04:00
Fernando Sahmkow
7ecf9f7228 Merge pull request #2983 from lioncash/fallthrough
gl_shader_decompiler/vk_shader_decompiler: Resolve implicit fallthrough cases
2019-10-22 13:16:46 -04:00
Fernando Sahmkow
1509d2ffbd Shader_Ir: Fix TLD4S from using a component mask.
TLD4S always outputs 4 values, the previous code checked a component 
mask and omitted those values that weren't part of it. This commit 
corrects that and makes sure all 4 values are set.
2019-10-22 10:59:07 -04:00
ReinUsesLisp
1ea07954fb shader_ir/memory: Ignore global memory when tracking fails
Ignore global memory operations instead of invoking undefined behaviour
when constant buffer tracking fails and we are blasting through asserts,
ignore the operation.

In the case of LDG this means filling the destination registers with
zeroes; for STG this means ignore the instruction as a whole.

The default behaviour is still to abort execution on failure.
2019-10-22 02:49:17 -03:00
David
1572fb9bf2 Merge pull request #3003 from ReinUsesLisp/flush-mme-trace
maxwell_3d: Reduce FlushMMEInlineDraw logging to Trace
2019-10-20 21:12:11 +11:00
ReinUsesLisp
e3107788e6 maxwell_3d: Reduce FlushMMEInlineDraw logging to Trace 2019-10-20 03:43:17 -03:00
bunnei
dd2e96b362 Merge pull request #3001 from bunnei/fix-clang-error
core: Fix clang-format errors.
2019-10-19 14:52:19 -04:00
bunnei
a86a88109a core: Fix clang-format errors. 2019-10-19 14:47:18 -04:00
bunnei
8d009a7aae Merge pull request #2998 from cociorbaandrei/fix_null_pderef
Fix null pointer dereference.
2019-10-18 10:50:01 -04:00
Nicolae-Andrei Cociorba
20521da259 Fix null pointer deref. 2019-10-18 14:12:12 +00:00
Rodrigo Locatti
dc5eedef71 Merge pull request #2994 from lioncash/fmt
video_core/shader/ast: Minor changes to ASTPrinter
2019-10-18 01:05:25 -03:00
Lioncash
074b38b7a9 video_core/shader/ast: Make ShowCurrentState() and SanityCheck() const member functions
These can also trivially be made const member functions, with the
addition of a few consts.
2019-10-17 20:59:48 -04:00
Lioncash
222f4b45eb video_core/shader/ast: Make ASTManager::Print a const member function
Given all visiting functions never modify the nodes, we can trivially
make this a const member function.
2019-10-17 20:56:39 -04:00
Rodrigo Locatti
fd922ddb01 Merge pull request #2993 from lioncash/vulkan-expr
vk_shader_decompiler: Mark operator() function parameters as const references
2019-10-17 21:46:49 -03:00
Lioncash
7831e86c34 video_core/shader/ast: Make ExprPrinter members private
This member already has an accessor, so there's no need for it to be
public.
2019-10-17 20:39:36 -04:00
Lioncash
a2eccbf075 video_core/shader/ast: Make Indent() return a string_view
The returned string is simply a substring of our constexpr tabs
string_view, so we can just use a string_view here as well, since the
original string_view is guaranteed to always exist.

Now the function is fully non-allocating.
2019-10-17 20:29:00 -04:00
Lioncash
15d177a6ac video_core/shader/ast: Make Indent() private
It's never used outside of this class, so we can narrow its scope down.
2019-10-17 20:26:13 -04:00
Lioncash
7f6a8a33d4 video_core/shader/ast: Rename Ident() to Indent()
This can be confusing, given "ident" is generally used as a shorthand
for "identifier".
2019-10-17 20:26:13 -04:00
Lioncash
081530686c video_core/shader/ast: Make use of fmt where applicable
Makes a few strings nicer to read and also eliminates a bit of string
churn with operator+.
2019-10-17 20:26:10 -04:00
bunnei
d1713ad451 Merge pull request #2992 from lioncash/dmnt
dmnt_cheat_vm: Correct register Restore and ClearRegs behavior
2019-10-17 19:52:24 -04:00
Lioncash
c6bec9aa10 vk_shader_decompiler: Mark operator() function parameters as const references
These parameters aren't actually modified in any way, so they can be
made const references.
2019-10-17 19:44:00 -04:00
Lioncash
2c34d8aabb dmnt_cheat_vm: Correct register Restore and ClearRegs behavior
Previously these were performing the same behavior as the Save and
ClearSaved opcode types.
2019-10-17 18:49:39 -04:00
Rodrigo Locatti
219fdcb9d9 Merge pull request #2966 from FernandoS27/astc-formats
Implement a series of ASTC formats and R4G4B4A4 format
2019-10-17 19:24:11 -03:00
Rodrigo Locatti
a21b88ef8f Merge pull request #2979 from lioncash/macro
video_core/macro_interpreter: Make definitions of most private enums/unions hidden
2019-10-17 19:21:09 -03:00
Rodrigo Locatti
7cf091bb20 Merge pull request #2989 from lioncash/apm
service/apm/controller: Minor interface changes
2019-10-17 19:20:14 -03:00
Lioncash
8d8e495248 hid/npad: Fix incorrect connection boolean value in ConnectAllDisconnectedControllers()
We should be setting the connection state to true, otherwise we aren't
actually making the controllers connected like the function name
indicates.
2019-10-17 18:19:47 -04:00
Rodrigo Locatti
7398466c33 Merge pull request #2990 from lioncash/warn
core: Resolve -Wreorder warnings
2019-10-17 19:18:52 -03:00
Lioncash
d076466f26 hid/npad: Add missing break in default case
While not an issue, it does prevent fallthrough from occurring if
anything is ever added after this case (unlikely to occur, but this
turns a trivial "should not cause issues" into a definite "won't cause
issues).
2019-10-17 18:17:42 -04:00
Lioncash
26c84718c8 hid/npad: Replace std::for_each with ranged for loops
Performs the same behavior, but is built into the core language itself.

No functional change.
2019-10-17 18:16:36 -04:00
Lioncash
e433e99191 hid/npad: Remove redundant non-const variant of IsControllerSupported()
The const qualified variant can also be called in non-const contexts, so
we can remove the non-const variant to eliminate a bit of code
duplication.
2019-10-17 18:11:41 -04:00
Lioncash
a71e8066a1 hid/npad: Move function declarations
Clearly separate these from the variable declarations to make them more
visible.
2019-10-17 18:09:08 -04:00
Lioncash
125caf5d6e video_core/macro_interpreter: Make definitions of most private enums/unions hidden
This allows the implementation of these types to change without
requiring a rebuild of everything that includes the macro interpreter
header.
2019-10-17 17:55:46 -04:00
Lioncash
98c6a95079 core/core: Resolve -Wreorder warnings
Amends the initializer lists to be ordered in the same manner that
they're declared within the class.
2019-10-17 17:51:06 -04:00
Lioncash
6414d9e2e5 core/memory/cheat_engine: Resolve -Wreorder warnings
Amends the initializer lists to be ordered in the same manner that
they're declared within the class.
2019-10-17 16:21:47 -04:00
Lioncash
b77430df70 apm/controller: Make SetPerformanceConfiguration() use an array of pairs over a map
While a map is an OK way to do lookups (and usually recommended in most
cases), this is a map that lives for the entire duration of the program
and only deallocates its contents when the program terminates.

Given the total size of the map is quite small, we can simply use a
std::array of pairs and utilize std::find_if to perform the same
behavior without loss of performance.

This eliminates a static constructor and places the data into the
read-only segment.

While we're at it, we can also handle malformed inputs instead of
directly dereferencing the resulting iterator.
2019-10-17 16:13:14 -04:00
Lioncash
141d929929 apm/controller: Make GetCurrentPerformanceMode() a const member function
This doesn't modify instance state, so it can be made const qualified.
2019-10-17 15:58:25 -04:00
bunnei
9fe8072c67 Merge pull request #2980 from lioncash/warn
maxwell_3d: Silence truncation warnings
2019-10-17 14:02:16 -04:00
Rodrigo Locatti
60c602e4e7 Merge pull request #2978 from lioncash/doxygen
video_core/texture_cache: Amend Doxygen references
2019-10-16 22:09:40 -03:00
Rodrigo Locatti
e00b529a89 Merge pull request #2982 from lioncash/surface
texture_cache: Avoid unnecessary surface copies within PickStrategy() and TryReconstructSurface()
2019-10-16 19:43:32 -03:00
bunnei
ef9b31783d Merge pull request #2912 from FernandoS27/async-fixes
General fixes to Async GPU
2019-10-16 10:34:48 -04:00
Rodrigo Locatti
60315060b1 Merge pull request #2984 from lioncash/fallthrough2
video_core/surface: Add missing break in PixelFormatFromTextureFormat()
2019-10-15 23:08:34 -03:00
Lioncash
cf9e13c255 video_core/surface: Add missing break in PixelFormatFromTextureFormat()
Prevents fallthrough into the following case.
2019-10-15 21:53:15 -04:00
Rodrigo Locatti
14f3cebcd4 Merge pull request #2981 from lioncash/copy
gl_shader_decompiler: Minor cleanup-related changes
2019-10-15 21:07:25 -03:00
Lioncash
6947bf8e44 vk_shader_decompiler: Resolve fallthrough within ExprDecompiler's ExprCondCode operator()
This would previously result in NeverExecute and UnusedIndex being
treated as regular predicates.
2019-10-15 19:40:58 -04:00
Lioncash
b42a74ff2c gl_shader_decompiler: Resolve fallthrough within ExprDecompiler's ExprCondCode operator()
This would previously result in NeverExecute and UnusedIndex being
treated as regular predicates.
2019-10-15 19:38:55 -04:00
Lioncash
a24e8bf9cf texture_cache: Avoid unnecessary surface copies within PickStrategy() and TryReconstructSurface()
We can take these by const reference and avoid making unnecessary
copies, preventing some atomic reference count increments and
decrements.
2019-10-15 19:31:33 -04:00
Lioncash
77b4916b33 control_flow: Silence truncation warnings
This can be trivially fixed by making the input size a size_t.
CFGRebuildState's constructor parameter is already a std::size_t, so
this just makes the size type fully conform with it.
2019-10-15 19:10:28 -04:00
Lioncash
4f16ce9294 gl_shader_decompiler: Make ExprDecompiler's GetResult() a const member function
This is only ever used to read, but not write, the resulting string, so
we can enforce this by making it a const member function.
2019-10-15 19:02:59 -04:00
Lioncash
67df3f7742 gl_shader_decompiler: Use a std::string_view with GetDeclarationWithSuffix()
This allows the function to be completely non-allocating for inputs of
all sizes (i.e. there's no heap cost for an input to convert to a
std::string_view).
2019-10-15 19:00:48 -04:00
Lioncash
04a1161354 gl_shader_decompiler: Fold flow_var constant into GetFlowVariable()
This is only ever used within this function, so we can narrow it's scope
down.
2019-10-15 18:58:36 -04:00
Lioncash
2f2ab9b5bc gl_shader_decompiler: Mark ASTDecompiler/ExprDecompiler parameters as const references where applicable
These member functions don't actually modify the input parameter, so we
can make this explicit with the use of const.
2019-10-15 18:57:02 -04:00
Lioncash
b8a62adcf1 gl_shader_decompiler: Pass by reference to GenerateTextureArgument()
Avoids an unnecessary atomic reference count increment and decrement.
2019-10-15 18:29:37 -04:00
Lioncash
d1d7ce74d2 gl_shader_decompiler: Use std::holds_alternative within GenerateTexture()
This only ever queries if the type exists within the variant, but
doesn't actually do anything with the return value. We can just use
std::holds_alternative for this use case.
2019-10-15 18:25:48 -04:00
Lioncash
67658dd6e8 shader/node: std::move Meta instance within OperationNode constructor
Allows usages of the constructor to avoid an unnecessary copy.
2019-10-15 18:21:59 -04:00
Lioncash
9760795bfb gl_shader_decompiler: Avoid unnecessary copies of MetaImage
MetaImage contains a std::vector, so copying here could result in
unnecessary reallocations. Given the operation lives throughout the
entire scope, this is safe to do.
2019-10-15 18:14:55 -04:00
Lioncash
c9c75f9587 maxwell_3d: Silence truncation warnings
A trivial warning caused by not using size_t as the argument types
instead of u32.
2019-10-15 17:51:35 -04:00
bunnei
2299950de1 Merge pull request #2972 from lioncash/system
{bcat, gpu, nvflinger}: Remove trivial usages of the global system accessor
2019-10-15 17:49:12 -04:00
bunnei
ba0086e32d Merge pull request #2977 from lioncash/algorithm
common: Rename binary_find.h to algorithm.h
2019-10-15 16:41:52 -04:00
Lioncash
b25b94400e video_core/gpu: Remove use of the global system accessor
We can just make use of the reference member variable instead of
accessing the global system instance.
2019-10-15 16:39:30 -04:00
Lioncash
cc1d7048b5 bcat: Remove use of global system accessors
Removes all uses of the global system accessor within the BCAT
interface.
2019-10-15 16:39:27 -04:00
Lioncash
524eb15513 video_core/texture_cache: Amend Doxygen references
Amends the doxygen comments so that they properly resolve. While we're
at it, we can correct some typos and fix up some of the comments'
formatting in order to make them slightly nicer to read.
2019-10-15 15:40:00 -04:00
Lioncash
d5706346d7 common/algorithm: Add description comment indicating intended algorithms
Makes it explicit that the header is intended for iterator-based
algorithms that can ideally operate on any type.
2019-10-15 15:25:23 -04:00
Lioncash
ac4dbd3b25 common: Rename binary_find.h to algorithm.h
Makes the header more general for other potential algorithms in the
future. While we're at it, include a missing <functional> include to
satisfy the use of std::less.
2019-10-15 15:24:50 -04:00
Fernando Sahmkow
64e652d8cb Kernel Thread: Cleanup THREADPROCESSORID_DONT_UPDATE. 2019-10-15 11:55:30 -04:00
Fernando Sahmkow
e28c7f5217 Kernel: Address Feedback 2 2019-10-15 11:55:28 -04:00
Fernando Sahmkow
a3524879be Kernel: Clang Format 2019-10-15 11:55:27 -04:00
Fernando Sahmkow
c32520ceb7 Kernel: Reverse global accessor removal. 2019-10-15 11:55:26 -04:00
Fernando Sahmkow
3073615dbc Kernel: Address Feedback. 2019-10-15 11:55:25 -04:00
Fernando Sahmkow
25f8606a6d Kernel Scheduler: Make sure the global scheduler shutdowns correctly. 2019-10-15 11:55:24 -04:00
Fernando Sahmkow
b3c1deba49 Kernel_Thread: Eliminate most global accessors. 2019-10-15 11:55:23 -04:00
Fernando Sahmkow
0b72b34d89 KernelSVC: Assert that condition variable address is aligned to 4 bytes. 2019-10-15 11:55:22 -04:00
Fernando Sahmkow
96b1b144af Kernel: Correct Paused scheduling 2019-10-15 11:55:21 -04:00
Fernando Sahmkow
1c6a11ab14 Kernel: Corrections to Wait Objects clearing in which a thread could still be signalled after a timeout or a cancel. 2019-10-15 11:55:20 -04:00
Fernando Sahmkow
27d571c084 Kernel: Correct redundant yields to only advance time forward. 2019-10-15 11:55:20 -04:00
Fernando Sahmkow
7176857177 Kernel: Corrections to ModifyByWaitingCountAndSignalToAddressIfEqual 2019-10-15 11:55:19 -04:00
Fernando Sahmkow
44e09e5f21 Kernel: Correct Results in Condition Variables and Mutexes 2019-10-15 11:55:18 -04:00
Fernando Sahmkow
1ec1e81373 Kernel: Clang Format 2019-10-15 11:55:17 -04:00
Fernando Sahmkow
e05a8c2385 Kernel: Remove global system accessor from WaitObject 2019-10-15 11:55:16 -04:00
Fernando Sahmkow
0cf26cee59 Scheduler: Implement Yield Count and Core migration on Thread Preemption. 2019-10-15 11:55:16 -04:00
Fernando Sahmkow
2d382de6fa Scheduler: Corrections to YieldAndBalanceLoad and Yield bombing protection. 2019-10-15 11:55:15 -04:00
Fernando Sahmkow
b49c0dab87 Kernel: Initial implementation of thread preemption. 2019-10-15 11:55:14 -04:00
Fernando Sahmkow
103f3a2fe5 Scheduler: Add protections for Yield bombing
In case of redundant yields, the scheduler will now idle the core for 
it's timeslice, in order to avoid continuously yielding the same thing 
over and over.
2019-10-15 11:55:13 -04:00
Fernando Sahmkow
82218c925a Kernel: Style and Corrections 2019-10-15 11:55:12 -04:00
Fernando Sahmkow
fcc6b34fff Correct PrepareReschedule 2019-10-15 11:55:12 -04:00
Fernando Sahmkow
3a94e7ea33 Comment and reorganize the scheduler 2019-10-15 11:55:11 -04:00
Fernando Sahmkow
b5d1e44782 Add PrepareReschedule where required. 2019-10-15 11:55:10 -04:00
Fernando Sahmkow
b8b7ebcece Correct compiling errors and addapt to the new interface. 2019-10-15 11:55:09 -04:00
Fernando Sahmkow
9031502974 Correct Supervisor Calls to work with the new scheduler, 2019-10-15 11:55:08 -04:00
Fernando Sahmkow
47c6c78c03 Redesign CPU Cores to work with the new scheduler 2019-10-15 11:55:07 -04:00
Fernando Sahmkow
57a71f899a Add interfacing to the Global Scheduler 2019-10-15 11:55:07 -04:00
Fernando Sahmkow
a1ac0c6cb4 Addapt thread class to the new Scheduler 2019-10-15 11:55:06 -04:00
Fernando Sahmkow
b164d8ee53 Implement a new Core Scheduler 2019-10-15 11:55:04 -04:00
bunnei
cab2619aeb Merge pull request #2965 from FernandoS27/fair-core-timing
Core Timing: Rework Core Timing to run all cores evenly.
2019-10-15 11:48:30 -04:00
bunnei
0378babd15 Merge pull request #2897 from DarkLordZach/oss-ext-fonts-1
pl_u: Move open source font archives and fix NAND error
2019-10-14 15:13:41 -04:00
bunnei
c274fd588d Merge pull request #2968 from FreddyFunk/fix-zl-zr-analog-triggers
yuzu/configure_input_player: Fix input handling for ZL and ZR from controllers with analog triggers
2019-10-14 13:04:49 -04:00
bunnei
cd2efed922 Merge pull request #2930 from DarkLordZach/gamecard-partitions
file_sys: Add code to access raw gamecard partitions and lazily load them
2019-10-14 10:29:10 -04:00
Zach Hilman
e0b9ee9b94 card_image: Implement system update commands in XCI 2019-10-13 14:18:45 -04:00
Zach Hilman
1911f85391 pl_u: Fix mismatched rebase size error in font encryption 2019-10-13 13:46:27 -04:00
Zach Hilman
36d829c27b pl_u: Use kernel physical memory 2019-10-13 13:46:27 -04:00
Zach Hilman
b3a8a094a5 pl_u: Remove excess static qualifier 2019-10-13 13:46:27 -04:00
Zach Hilman
40284c6868 pl_u: Use OSS system archives if real archives don't exist 2019-10-13 13:46:27 -04:00
Zach Hilman
920742d418 system_archive: Synthesize shared fonts system archives 2019-10-13 13:46:10 -04:00
Zach Hilman
d6d6a87bde externals: Move OSS font data to file_sys in core 2019-10-13 13:46:10 -04:00
Lioncash
574440d59f nvflinger/buffer_queue: Remove use of a global system accessor 2019-10-12 09:17:56 -04:00
Fernando Sahmkow
a4ae11d63e Core_Timing: Address Remaining feedback. 2019-10-12 07:26:38 -04:00
Fernando Sahmkow
91f6333e23 Core_Timing: Fix tests. 2019-10-12 07:23:08 -04:00
Fernando Sahmkow
e0650a2034 Core_Timing: Address Feedback and suppress warnings. 2019-10-11 14:44:14 -04:00
Fernando Sahmkow
cfc2f30dc4 AsyncGpu: Address Feedback 2019-10-11 13:41:15 -04:00
FreddyFunk
be5dc68ca3 fixed clang format & addressed feedback 2019-10-10 23:27:00 +02:00
FreddyFunk
a31c6b2c4e yuzu/configure_input_player: Fix input handling for ZL and ZR from controllers with analog triggers 2019-10-10 23:12:06 +02:00
Fernando Sahmkow
f32a49d3d8 Surfaces: Implement R4G4B4A4U format. 2019-10-09 12:57:02 -04:00
Fernando Sahmkow
b9ddb517b1 Surfaces: Implement ASTC 6x6 10x10 12x12 8x6 6x5 2019-10-09 12:44:31 -04:00
Fernando Sahmkow
96f2b16356 Core Timing: Correct Idle and remove lefting pragma 2019-10-09 12:30:33 -04:00
Fernando Sahmkow
65aff6930b Core Timing: General corrections and added tests. 2019-10-09 12:30:33 -04:00
Fernando Sahmkow
c9a1129c95 Tests: Eliminate old Core Timing Tests 2019-10-09 12:30:32 -04:00
Fernando Sahmkow
555866f8dc Core Timing: Rework Core Timing to run all cores evenly. 2019-10-09 12:30:31 -04:00
Fernando Sahmkow
538f5880ff GL_Renderer: Remove lefting snippet. 2019-10-04 19:59:55 -04:00
Fernando Sahmkow
75395605d6 NvFlinger: Remove leftover from corrections and clang format. 2019-10-04 19:59:54 -04:00
Fernando Sahmkow
9f2719d1a4 Gl_Rasterizer: Protect CPU Memory mapping from multiple threads. 2019-10-04 19:59:53 -04:00
Fernando Sahmkow
3f104464de Core: Wait for GPU to be idle before shutting down. 2019-10-04 19:59:53 -04:00
Fernando Sahmkow
69fa2e6525 Nvdrv: Correct Event setup in Nvdrv
Events are supposed to be cleared on quering. This fixes that issue.
2019-10-04 19:59:52 -04:00
Fernando Sahmkow
782b7a0ca4 NVFlinger: Reverse the change that only signaled events on buffer acquire.
This has been hardware tested and it seems that NVFlinger will still 
signal even if there are no buffers to present.
2019-10-04 19:59:51 -04:00
Fernando Sahmkow
ffc2ce89a0 Nvdrv: Do framelimiting only in the CPU Thread 2019-10-04 19:59:50 -04:00
Fernando Sahmkow
976d9ef43c NvFlinger: Don't swap buffers if a frame is missing and always trigger event in sync gpu. 2019-10-04 19:59:49 -04:00
Fernando Sahmkow
5b5e60ffec GPU_Async: Correct fences, display events and more.
This commit uses guest fences on vSync event instead of an articial fake 
fence we had.
It also corrects to keep signaling display events while loading the game 
as the OS is suppose to send buffers to vSync during that time.
2019-10-04 19:59:48 -04:00
Fernando Sahmkow
4e9f975935 Nvdrv: Correct Async regression and avoid signaling empty buffer vsyncs 2019-10-04 19:59:47 -04:00
Zach Hilman
c4f3400bea card_image: Add accessors for raw partitions in XCI 2019-09-22 21:51:46 -04:00
Zach Hilman
3952c73aee card_image: Lazily load partitions in XCI 2019-09-22 21:50:29 -04:00
Zach Hilman
3895f7e456 pfs: Provide accessors for file sizes and offsets 2019-09-22 21:44:36 -04:00
201 changed files with 78916 additions and 114739 deletions

8
.gitmodules vendored
View File

@@ -26,11 +26,11 @@
path = externals/mbedtls
url = https://github.com/DarkLordZach/mbedtls
[submodule "opus"]
path = externals/opus
url = https://github.com/ogniK5377/opus.git
path = externals/opus/opus
url = https://github.com/xiph/opus.git
[submodule "soundtouch"]
path = externals/soundtouch
url = https://github.com/citra-emu/ext-soundtouch.git
path = externals/soundtouch
url = https://github.com/citra-emu/ext-soundtouch.git
[submodule "libressl"]
path = externals/libressl
url = https://github.com/citra-emu/ext-libressl-portable.git

View File

@@ -85,10 +85,12 @@ set(HASH_FILES
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/ast.cpp"
"${VIDEO_CORE}/shader/ast.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/compiler_settings.cpp"
"${VIDEO_CORE}/shader/compiler_settings.h"
"${VIDEO_CORE}/shader/const_buffer_locker.cpp"
"${VIDEO_CORE}/shader/const_buffer_locker.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/expr.cpp"
"${VIDEO_CORE}/shader/expr.h"

View File

@@ -42,9 +42,6 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
add_library(microprofile INTERFACE)
target_include_directories(microprofile INTERFACE ./microprofile)
# Open Source Archives
add_subdirectory(open_source_archives EXCLUDE_FROM_ALL)
# Unicorn
add_library(unicorn-headers INTERFACE)
target_include_directories(unicorn-headers INTERFACE ./unicorn/include)

View File

@@ -1,16 +0,0 @@
add_library(open_source_archives
src/FontChineseSimplified.cpp
src/FontChineseTraditional.cpp
src/FontExtendedChineseSimplified.cpp
src/FontKorean.cpp
src/FontNintendoExtended.cpp
src/FontStandard.cpp
include/FontChineseSimplified.h
include/FontChineseTraditional.h
include/FontExtendedChineseSimplified.h
include/FontKorean.h
include/FontNintendoExtended.h
include/FontStandard.h
)
target_include_directories(open_source_archives PUBLIC include)

View File

@@ -1,4 +0,0 @@
These files were generated by https://github.com/FearlessTobi/yuzu_system_archives at git commit 0a24b0c9f38d71fb2c4bba5645a39029e539a5ec. To generate the files use the run.sh inside that repository.
The follwing system archives are currently included:
- JPN/EUR/USA System Font

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 217276> FontChineseSimplified;

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 222236> FontChineseTraditional;

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 293516> FontExtendedChineseSimplified;

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 217276> FontKorean;

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 172064> FontNintendoExtended;

View File

@@ -1,6 +0,0 @@
#pragma once
#include <array>
extern const std::array<unsigned char, 217276> FontStandard;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1
externals/opus vendored

Submodule externals/opus deleted from 562f8ba555

250
externals/opus/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,250 @@
cmake_minimum_required(VERSION 3.8)
project(opus)
option(OPUS_STACK_PROTECTOR "Use stack protection" OFF)
option(OPUS_USE_ALLOCA "Use alloca for stack arrays (on non-C99 compilers)" OFF)
option(OPUS_CUSTOM_MODES "Enable non-Opus modes, e.g. 44.1 kHz & 2^n frames" OFF)
option(OPUS_FIXED_POINT "Compile as fixed-point (for machines without a fast enough FPU)" OFF)
option(OPUS_ENABLE_FLOAT_API "Compile with the floating point API (for machines with float library" ON)
include(opus/opus_functions.cmake)
if(OPUS_STACK_PROTECTOR)
if(NOT MSVC) # GC on by default on MSVC
check_and_set_flag(STACK_PROTECTION_STRONG -fstack-protector-strong)
endif()
else()
if(MSVC)
check_and_set_flag(BUFFER_SECURITY_CHECK /GS-)
endif()
endif()
add_library(opus STATIC
# CELT sources
opus/celt/bands.c
opus/celt/celt.c
opus/celt/celt_decoder.c
opus/celt/celt_encoder.c
opus/celt/celt_lpc.c
opus/celt/cwrs.c
opus/celt/entcode.c
opus/celt/entdec.c
opus/celt/entenc.c
opus/celt/kiss_fft.c
opus/celt/laplace.c
opus/celt/mathops.c
opus/celt/mdct.c
opus/celt/modes.c
opus/celt/pitch.c
opus/celt/quant_bands.c
opus/celt/rate.c
opus/celt/vq.c
# SILK sources
opus/silk/A2NLSF.c
opus/silk/CNG.c
opus/silk/HP_variable_cutoff.c
opus/silk/LPC_analysis_filter.c
opus/silk/LPC_fit.c
opus/silk/LPC_inv_pred_gain.c
opus/silk/LP_variable_cutoff.c
opus/silk/NLSF2A.c
opus/silk/NLSF_VQ.c
opus/silk/NLSF_VQ_weights_laroia.c
opus/silk/NLSF_decode.c
opus/silk/NLSF_del_dec_quant.c
opus/silk/NLSF_encode.c
opus/silk/NLSF_stabilize.c
opus/silk/NLSF_unpack.c
opus/silk/NSQ.c
opus/silk/NSQ_del_dec.c
opus/silk/PLC.c
opus/silk/VAD.c
opus/silk/VQ_WMat_EC.c
opus/silk/ana_filt_bank_1.c
opus/silk/biquad_alt.c
opus/silk/bwexpander.c
opus/silk/bwexpander_32.c
opus/silk/check_control_input.c
opus/silk/code_signs.c
opus/silk/control_SNR.c
opus/silk/control_audio_bandwidth.c
opus/silk/control_codec.c
opus/silk/dec_API.c
opus/silk/decode_core.c
opus/silk/decode_frame.c
opus/silk/decode_indices.c
opus/silk/decode_parameters.c
opus/silk/decode_pitch.c
opus/silk/decode_pulses.c
opus/silk/decoder_set_fs.c
opus/silk/enc_API.c
opus/silk/encode_indices.c
opus/silk/encode_pulses.c
opus/silk/gain_quant.c
opus/silk/init_decoder.c
opus/silk/init_encoder.c
opus/silk/inner_prod_aligned.c
opus/silk/interpolate.c
opus/silk/lin2log.c
opus/silk/log2lin.c
opus/silk/pitch_est_tables.c
opus/silk/process_NLSFs.c
opus/silk/quant_LTP_gains.c
opus/silk/resampler.c
opus/silk/resampler_down2.c
opus/silk/resampler_down2_3.c
opus/silk/resampler_private_AR2.c
opus/silk/resampler_private_IIR_FIR.c
opus/silk/resampler_private_down_FIR.c
opus/silk/resampler_private_up2_HQ.c
opus/silk/resampler_rom.c
opus/silk/shell_coder.c
opus/silk/sigm_Q15.c
opus/silk/sort.c
opus/silk/stereo_LR_to_MS.c
opus/silk/stereo_MS_to_LR.c
opus/silk/stereo_decode_pred.c
opus/silk/stereo_encode_pred.c
opus/silk/stereo_find_predictor.c
opus/silk/stereo_quant_pred.c
opus/silk/sum_sqr_shift.c
opus/silk/table_LSF_cos.c
opus/silk/tables_LTP.c
opus/silk/tables_NLSF_CB_NB_MB.c
opus/silk/tables_NLSF_CB_WB.c
opus/silk/tables_gain.c
opus/silk/tables_other.c
opus/silk/tables_pitch_lag.c
opus/silk/tables_pulses_per_block.c
# Opus sources
opus/src/analysis.c
opus/src/mapping_matrix.c
opus/src/mlp.c
opus/src/mlp_data.c
opus/src/opus.c
opus/src/opus_decoder.c
opus/src/opus_encoder.c
opus/src/opus_multistream.c
opus/src/opus_multistream_decoder.c
opus/src/opus_multistream_encoder.c
opus/src/opus_projection_decoder.c
opus/src/opus_projection_encoder.c
opus/src/repacketizer.c
)
if (DEBUG)
target_sources(opus PRIVATE opus/silk/debug.c)
endif()
if (OPUS_FIXED_POINT)
target_sources(opus PRIVATE
opus/silk/fixed/LTP_analysis_filter_FIX.c
opus/silk/fixed/LTP_scale_ctrl_FIX.c
opus/silk/fixed/apply_sine_window_FIX.c
opus/silk/fixed/autocorr_FIX.c
opus/silk/fixed/burg_modified_FIX.c
opus/silk/fixed/corrMatrix_FIX.c
opus/silk/fixed/encode_frame_FIX.c
opus/silk/fixed/find_LPC_FIX.c
opus/silk/fixed/find_LTP_FIX.c
opus/silk/fixed/find_pitch_lags_FIX.c
opus/silk/fixed/find_pred_coefs_FIX.c
opus/silk/fixed/k2a_FIX.c
opus/silk/fixed/k2a_Q16_FIX.c
opus/silk/fixed/noise_shape_analysis_FIX.c
opus/silk/fixed/pitch_analysis_core_FIX.c
opus/silk/fixed/prefilter_FIX.c
opus/silk/fixed/process_gains_FIX.c
opus/silk/fixed/regularize_correlations_FIX.c
opus/silk/fixed/residual_energy16_FIX.c
opus/silk/fixed/residual_energy_FIX.c
opus/silk/fixed/schur64_FIX.c
opus/silk/fixed/schur_FIX.c
opus/silk/fixed/solve_LS_FIX.c
opus/silk/fixed/vector_ops_FIX.c
opus/silk/fixed/warped_autocorrelation_FIX.c
)
else()
target_sources(opus PRIVATE
opus/silk/float/LPC_analysis_filter_FLP.c
opus/silk/float/LPC_inv_pred_gain_FLP.c
opus/silk/float/LTP_analysis_filter_FLP.c
opus/silk/float/LTP_scale_ctrl_FLP.c
opus/silk/float/apply_sine_window_FLP.c
opus/silk/float/autocorrelation_FLP.c
opus/silk/float/burg_modified_FLP.c
opus/silk/float/bwexpander_FLP.c
opus/silk/float/corrMatrix_FLP.c
opus/silk/float/encode_frame_FLP.c
opus/silk/float/energy_FLP.c
opus/silk/float/find_LPC_FLP.c
opus/silk/float/find_LTP_FLP.c
opus/silk/float/find_pitch_lags_FLP.c
opus/silk/float/find_pred_coefs_FLP.c
opus/silk/float/inner_product_FLP.c
opus/silk/float/k2a_FLP.c
opus/silk/float/noise_shape_analysis_FLP.c
opus/silk/float/pitch_analysis_core_FLP.c
opus/silk/float/process_gains_FLP.c
opus/silk/float/regularize_correlations_FLP.c
opus/silk/float/residual_energy_FLP.c
opus/silk/float/scale_copy_vector_FLP.c
opus/silk/float/scale_vector_FLP.c
opus/silk/float/schur_FLP.c
opus/silk/float/sort_FLP.c
opus/silk/float/warped_autocorrelation_FLP.c
opus/silk/float/wrappers_FLP.c
)
endif()
target_compile_definitions(opus PRIVATE OPUS_BUILD ENABLE_HARDENING)
if(NOT MSVC)
target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=2)
endif()
# It is strongly recommended to uncomment one of these VAR_ARRAYS: Use C99
# variable-length arrays for stack allocation USE_ALLOCA: Use alloca() for stack
# allocation If none is defined, then the fallback is a non-threadsafe global
# array
if(OPUS_USE_ALLOCA OR MSVC)
target_compile_definitions(opus PRIVATE USE_ALLOCA)
else()
target_compile_definitions(opus PRIVATE VAR_ARRAYS)
endif()
if(OPUS_CUSTOM_MODES)
target_compile_definitions(opus PRIVATE CUSTOM_MODES)
endif()
if(NOT OPUS_ENABLE_FLOAT_API)
target_compile_definitions(opus PRIVATE DISABLE_FLOAT_API)
endif()
target_compile_definitions(opus
PUBLIC
-DOPUS_VERSION="\\"1.3.1\\""
PRIVATE
# Use C99 intrinsics to speed up float-to-int conversion
HAVE_LRINTF
)
if (FIXED_POINT)
target_compile_definitions(opus PRIVATE -DFIXED_POINT=1 -DDISABLE_FLOAT_API)
endif()
target_include_directories(opus
PUBLIC
opus/include
PRIVATE
opus/celt
opus/silk
opus/silk/fixed
opus/silk/float
opus/src
)

1
externals/opus/opus vendored Submodule

Submodule externals/opus/opus added at ad8fe90db7

View File

@@ -74,10 +74,12 @@ add_custom_command(OUTPUT scm_rev.cpp
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/ast.cpp"
"${VIDEO_CORE}/shader/ast.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/compiler_settings.cpp"
"${VIDEO_CORE}/shader/compiler_settings.h"
"${VIDEO_CORE}/shader/const_buffer_locker.cpp"
"${VIDEO_CORE}/shader/const_buffer_locker.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/expr.cpp"
"${VIDEO_CORE}/shader/expr.h"
@@ -95,11 +97,11 @@ add_custom_command(OUTPUT scm_rev.cpp
)
add_library(common STATIC
algorithm.h
alignment.h
assert.h
detached_tasks.cpp
detached_tasks.h
binary_find.h
bit_field.h
bit_util.h
cityhash.cpp

View File

@@ -5,6 +5,12 @@
#pragma once
#include <algorithm>
#include <functional>
// Algorithms that operate on iterators, much like the <algorithm> header.
//
// Note: If the algorithm is not general-purpose and/or doesn't operate on iterators,
// it should probably not be placed within this header.
namespace Common {

View File

@@ -28,18 +28,14 @@ __declspec(noinline, noreturn)
}
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
} \
while (0)
if (!(_a_)) { \
LOG_CRITICAL(Debug, "Assertion Failed!"); \
}
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
} \
while (0)
if (!(_a_)) { \
LOG_CRITICAL(Debug, "Assertion Failed! " __VA_ARGS__); \
}
#define UNREACHABLE() ASSERT_MSG(false, "Unreachable code!")
#define UNREACHABLE_MSG(...) ASSERT_MSG(false, __VA_ARGS__)

View File

@@ -168,11 +168,11 @@ public:
constexpr BitField(BitField&&) noexcept = default;
constexpr BitField& operator=(BitField&&) noexcept = default;
constexpr FORCE_INLINE operator T() const {
constexpr operator T() const {
return Value();
}
constexpr FORCE_INLINE void Assign(const T& value) {
constexpr void Assign(const T& value) {
storage = (static_cast<StorageType>(storage) & ~mask) | FormatValue(value);
}

View File

@@ -6,6 +6,8 @@
#include <cstddef>
#include <cstring>
#include <utility>
#include <boost/functional/hash.hpp>
#include "common/cityhash.h"
#include "common/common_types.h"
@@ -68,4 +70,13 @@ struct HashableStruct {
}
};
struct PairHash {
template <class T1, class T2>
std::size_t operator()(const std::pair<T1, T2>& pair) const noexcept {
std::size_t seed = std::hash<T1>()(pair.first);
boost::hash_combine(seed, std::hash<T2>()(pair.second));
return seed;
}
};
} // namespace Common

View File

@@ -304,6 +304,13 @@ public:
return levels[priority == Depth ? 63 : priority].back();
}
void clear() {
used_priorities = 0;
for (std::size_t i = 0; i < Depth; i++) {
levels[i].clear();
}
}
private:
using const_list_iterator = typename std::list<T>::const_iterator;

View File

@@ -74,10 +74,24 @@ add_library(core STATIC
file_sys/sdmc_factory.h
file_sys/submission_package.cpp
file_sys/submission_package.h
file_sys/system_archive/data/font_chinese_simplified.cpp
file_sys/system_archive/data/font_chinese_simplified.h
file_sys/system_archive/data/font_chinese_traditional.cpp
file_sys/system_archive/data/font_chinese_traditional.h
file_sys/system_archive/data/font_extended_chinese_simplified.cpp
file_sys/system_archive/data/font_extended_chinese_simplified.h
file_sys/system_archive/data/font_korean.cpp
file_sys/system_archive/data/font_korean.h
file_sys/system_archive/data/font_nintendo_extended.cpp
file_sys/system_archive/data/font_nintendo_extended.h
file_sys/system_archive/data/font_standard.cpp
file_sys/system_archive/data/font_standard.h
file_sys/system_archive/mii_model.cpp
file_sys/system_archive/mii_model.h
file_sys/system_archive/ng_word.cpp
file_sys/system_archive/ng_word.h
file_sys/system_archive/shared_font.cpp
file_sys/system_archive/shared_font.h
file_sys/system_archive/system_archive.cpp
file_sys/system_archive/system_archive.h
file_sys/system_archive/system_version.cpp
@@ -511,7 +525,7 @@ add_library(core STATIC
create_target_directory_groups(core)
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn open_source_archives)
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn)
if (YUZU_ENABLE_BOXCAT)
get_directory_property(OPENSSL_LIBS

View File

@@ -116,7 +116,7 @@ public:
num_interpreted_instructions = 0;
}
u64 GetTicksRemaining() override {
return std::max(parent.system.CoreTiming().GetDowncount(), 0);
return std::max(parent.system.CoreTiming().GetDowncount(), s64{0});
}
u64 GetCNTPCT() override {
return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());

View File

@@ -156,7 +156,7 @@ void ARM_Unicorn::Run() {
if (GDBStub::IsServerEnabled()) {
ExecuteInstructions(std::max(4000000, 0));
} else {
ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), 0));
ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), s64{0}));
}
}

View File

@@ -112,8 +112,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
}
struct System::Impl {
explicit Impl(System& system)
: kernel{system}, fs_controller{system}, cpu_core_manager{system},
applet_manager{system}, reporter{system} {}
: kernel{system}, fs_controller{system}, cpu_core_manager{system}, reporter{system},
applet_manager{system} {}
Cpu& CurrentCpuCore() {
return cpu_core_manager.GetCurrentCore();
@@ -240,22 +240,27 @@ struct System::Impl {
}
void Shutdown() {
// Log last frame performance stats
const auto perf_results = GetAndResetPerfStats();
telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_EmulationSpeed",
perf_results.emulation_speed * 100.0);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate",
perf_results.game_fps);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime",
perf_results.frametime * 1000.0);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Mean_Frametime_MS",
perf_stats->GetMeanFrametime());
// Log last frame performance stats if game was loded
if (perf_stats) {
const auto perf_results = GetAndResetPerfStats();
telemetry_session->AddField(Telemetry::FieldType::Performance,
"Shutdown_EmulationSpeed",
perf_results.emulation_speed * 100.0);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Framerate",
perf_results.game_fps);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Shutdown_Frametime",
perf_results.frametime * 1000.0);
telemetry_session->AddField(Telemetry::FieldType::Performance, "Mean_Frametime_MS",
perf_stats->GetMeanFrametime());
}
lm_manager.Flush();
is_powered_on = false;
exit_lock = false;
gpu_core->WaitIdle();
// Shutdown emulation session
renderer.reset();
GDBStub::Shutdown();
@@ -404,6 +409,12 @@ void System::PrepareReschedule() {
CurrentCpuCore().PrepareReschedule();
}
void System::PrepareReschedule(const u32 core_index) {
if (core_index < GlobalScheduler().CpuCoresCount()) {
CpuCore(core_index).PrepareReschedule();
}
}
PerfStatsResults System::GetAndResetPerfStats() {
return impl->GetAndResetPerfStats();
}
@@ -444,6 +455,16 @@ const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
return CpuCore(core_index).Scheduler();
}
/// Gets the global scheduler
Kernel::GlobalScheduler& System::GlobalScheduler() {
return impl->kernel.GlobalScheduler();
}
/// Gets the global scheduler
const Kernel::GlobalScheduler& System::GlobalScheduler() const {
return impl->kernel.GlobalScheduler();
}
Kernel::Process* System::CurrentProcess() {
return impl->kernel.CurrentProcess();
}

View File

@@ -24,6 +24,7 @@ class VfsFilesystem;
} // namespace FileSys
namespace Kernel {
class GlobalScheduler;
class KernelCore;
class Process;
class Scheduler;
@@ -184,6 +185,9 @@ public:
/// Prepare the core emulation for a reschedule
void PrepareReschedule();
/// Prepare the core emulation for a reschedule
void PrepareReschedule(u32 core_index);
/// Gets and resets core performance statistics
PerfStatsResults GetAndResetPerfStats();
@@ -238,6 +242,12 @@ public:
/// Gets the scheduler for the CPU core with the specified index
const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
/// Gets the global scheduler
Kernel::GlobalScheduler& GlobalScheduler();
/// Gets the global scheduler
const Kernel::GlobalScheduler& GlobalScheduler() const;
/// Provides a pointer to the current process
Kernel::Process* CurrentProcess();

View File

@@ -52,7 +52,8 @@ bool CpuBarrier::Rendezvous() {
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
std::size_t core_index)
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
: cpu_barrier{cpu_barrier}, global_scheduler{system.GlobalScheduler()},
core_timing{system.CoreTiming()}, core_index{core_index} {
#ifdef ARCHITECTURE_x86_64
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
#else
@@ -60,7 +61,7 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
#endif
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface);
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface, core_index);
}
Cpu::~Cpu() = default;
@@ -81,29 +82,21 @@ void Cpu::RunLoop(bool tight_loop) {
return;
}
Reschedule();
// If we don't have a currently active thread then don't execute instructions,
// instead advance to the next event and try to yield to the next thread
if (Kernel::GetCurrentThread() == nullptr) {
LOG_TRACE(Core, "Core-{} idling", core_index);
if (IsMainCore()) {
// TODO(Subv): Only let CoreTiming idle if all 4 cores are idling.
core_timing.Idle();
core_timing.Advance();
}
PrepareReschedule();
core_timing.Idle();
} else {
if (IsMainCore()) {
core_timing.Advance();
}
if (tight_loop) {
arm_interface->Run();
} else {
arm_interface->Step();
}
}
core_timing.Advance();
Reschedule();
}
@@ -114,18 +107,18 @@ void Cpu::SingleStep() {
void Cpu::PrepareReschedule() {
arm_interface->PrepareReschedule();
reschedule_pending = true;
}
void Cpu::Reschedule() {
if (!reschedule_pending) {
return;
}
reschedule_pending = false;
// Lock the global kernel mutex when we manipulate the HLE state
std::lock_guard lock{HLE::g_hle_lock};
scheduler->Reschedule();
std::lock_guard lock(HLE::g_hle_lock);
global_scheduler.SelectThread(core_index);
scheduler->TryDoContextSwitch();
}
void Cpu::Shutdown() {
scheduler->Shutdown();
}
} // namespace Core

View File

@@ -12,8 +12,9 @@
#include "common/common_types.h"
namespace Kernel {
class GlobalScheduler;
class Scheduler;
}
} // namespace Kernel
namespace Core {
class System;
@@ -83,6 +84,8 @@ public:
return core_index;
}
void Shutdown();
static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores);
private:
@@ -90,6 +93,7 @@ private:
std::unique_ptr<ARM_Interface> arm_interface;
CpuBarrier& cpu_barrier;
Kernel::GlobalScheduler& global_scheduler;
std::unique_ptr<Kernel::Scheduler> scheduler;
Timing::CoreTiming& core_timing;

View File

@@ -15,7 +15,7 @@
namespace Core::Timing {
constexpr int MAX_SLICE_LENGTH = 20000;
constexpr int MAX_SLICE_LENGTH = 10000;
struct CoreTiming::Event {
s64 time;
@@ -38,10 +38,12 @@ CoreTiming::CoreTiming() = default;
CoreTiming::~CoreTiming() = default;
void CoreTiming::Initialize() {
downcount = MAX_SLICE_LENGTH;
downcounts.fill(MAX_SLICE_LENGTH);
time_slice.fill(MAX_SLICE_LENGTH);
slice_length = MAX_SLICE_LENGTH;
global_timer = 0;
idled_cycles = 0;
current_context = 0;
// The time between CoreTiming being initialized and the first call to Advance() is considered
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
@@ -110,7 +112,7 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
u64 CoreTiming::GetTicks() const {
u64 ticks = static_cast<u64>(global_timer);
if (!is_global_timer_sane) {
ticks += slice_length - downcount;
ticks += accumulated_ticks;
}
return ticks;
}
@@ -120,7 +122,8 @@ u64 CoreTiming::GetIdleTicks() const {
}
void CoreTiming::AddTicks(u64 ticks) {
downcount -= static_cast<int>(ticks);
accumulated_ticks += ticks;
downcounts[current_context] -= static_cast<s64>(ticks);
}
void CoreTiming::ClearPendingEvents() {
@@ -141,22 +144,35 @@ void CoreTiming::RemoveEvent(const EventType* event_type) {
void CoreTiming::ForceExceptionCheck(s64 cycles) {
cycles = std::max<s64>(0, cycles);
if (downcount <= cycles) {
if (downcounts[current_context] <= cycles) {
return;
}
// downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
// here. Account for cycles already executed by adjusting the g.slice_length
slice_length -= downcount - static_cast<int>(cycles);
downcount = static_cast<int>(cycles);
downcounts[current_context] = static_cast<int>(cycles);
}
std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const {
const u64 original_context = current_context;
u64 next_context = (original_context + 1) % num_cpu_cores;
while (next_context != original_context) {
if (time_slice[next_context] >= needed_ticks) {
return {next_context};
} else if (time_slice[next_context] >= 0) {
return std::nullopt;
}
next_context = (next_context + 1) % num_cpu_cores;
}
return std::nullopt;
}
void CoreTiming::Advance() {
std::unique_lock<std::mutex> guard(inner_mutex);
const int cycles_executed = slice_length - downcount;
const u64 cycles_executed = accumulated_ticks;
time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks);
global_timer += cycles_executed;
slice_length = MAX_SLICE_LENGTH;
is_global_timer_sane = true;
@@ -173,24 +189,46 @@ void CoreTiming::Advance() {
// Still events left (scheduled in the future)
if (!event_queue.empty()) {
slice_length = static_cast<int>(
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH));
const s64 needed_ticks =
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
const auto next_core = NextAvailableCore(needed_ticks);
if (next_core) {
downcounts[*next_core] = needed_ticks;
}
}
downcount = slice_length;
accumulated_ticks = 0;
downcounts[current_context] = time_slice[current_context];
}
void CoreTiming::ResetRun() {
downcounts.fill(MAX_SLICE_LENGTH);
time_slice.fill(MAX_SLICE_LENGTH);
current_context = 0;
// Still events left (scheduled in the future)
if (!event_queue.empty()) {
const s64 needed_ticks =
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
downcounts[current_context] = needed_ticks;
}
is_global_timer_sane = false;
accumulated_ticks = 0;
}
void CoreTiming::Idle() {
idled_cycles += downcount;
downcount = 0;
accumulated_ticks += downcounts[current_context];
idled_cycles += downcounts[current_context];
downcounts[current_context] = 0;
}
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE};
}
int CoreTiming::GetDowncount() const {
return downcount;
s64 CoreTiming::GetDowncount() const {
return downcounts[current_context];
}
} // namespace Core::Timing

View File

@@ -7,6 +7,7 @@
#include <chrono>
#include <functional>
#include <mutex>
#include <optional>
#include <string>
#include <unordered_map>
#include <vector>
@@ -104,7 +105,19 @@ public:
std::chrono::microseconds GetGlobalTimeUs() const;
int GetDowncount() const;
void ResetRun();
s64 GetDowncount() const;
void SwitchContext(u64 new_context) {
current_context = new_context;
}
bool CanCurrentContextRun() const {
return time_slice[current_context] > 0;
}
std::optional<u64> NextAvailableCore(const s64 needed_ticks) const;
private:
struct Event;
@@ -112,10 +125,16 @@ private:
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
static constexpr u64 num_cpu_cores = 4;
s64 global_timer = 0;
s64 idled_cycles = 0;
int slice_length = 0;
int downcount = 0;
s64 slice_length = 0;
u64 accumulated_ticks = 0;
std::array<s64, num_cpu_cores> downcounts{};
// Slice of time assigned to each core per run.
std::array<s64, num_cpu_cores> time_slice{};
u64 current_context = 0;
// Are we in a function that has been called from Advance()
// If events are scheduled from a function that gets called from Advance(),

View File

@@ -6,6 +6,7 @@
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_cpu.h"
#include "core/core_timing.h"
#include "core/cpu_core_manager.h"
#include "core/gdbstub/gdbstub.h"
#include "core/settings.h"
@@ -57,6 +58,7 @@ void CpuCoreManager::Shutdown() {
thread_to_cpu.clear();
for (auto& cpu_core : cores) {
cpu_core->Shutdown();
cpu_core.reset();
}
@@ -122,13 +124,19 @@ void CpuCoreManager::RunLoop(bool tight_loop) {
}
}
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
cores[active_core]->RunLoop(tight_loop);
if (Settings::values.use_multi_core) {
// Cores 1-3 are run on other threads in this mode
break;
auto& core_timing = system.CoreTiming();
core_timing.ResetRun();
bool keep_running{};
do {
keep_running = false;
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
core_timing.SwitchContext(active_core);
if (core_timing.CanCurrentContextRun()) {
cores[active_core]->RunLoop(tight_loop);
}
keep_running |= core_timing.CanCurrentContextRun();
}
}
} while (keep_running);
if (GDBStub::IsServerEnabled()) {
GDBStub::SetCpuStepFlag(false);

View File

@@ -31,7 +31,7 @@ constexpr std::array partition_names{
XCI::XCI(VirtualFile file_)
: file(std::move(file_)), program_nca_status{Loader::ResultStatus::ErrorXCIMissingProgramNCA},
partitions(partition_names.size()) {
partitions(partition_names.size()), partitions_raw(partition_names.size()) {
if (file->ReadObject(&header) != sizeof(GamecardHeader)) {
status = Loader::ResultStatus::ErrorBadXCIHeader;
return;
@@ -42,8 +42,10 @@ XCI::XCI(VirtualFile file_)
return;
}
PartitionFilesystem main_hfs(
std::make_shared<OffsetVfsFile>(file, header.hfs_size, header.hfs_offset));
PartitionFilesystem main_hfs(std::make_shared<OffsetVfsFile>(
file, file->GetSize() - header.hfs_offset, header.hfs_offset));
update_normal_partition_end = main_hfs.GetFileOffsets()["secure"];
if (main_hfs.GetStatus() != Loader::ResultStatus::Success) {
status = main_hfs.GetStatus();
@@ -55,9 +57,7 @@ XCI::XCI(VirtualFile file_)
const auto partition_idx = static_cast<std::size_t>(partition);
auto raw = main_hfs.GetFile(partition_names[partition_idx]);
if (raw != nullptr) {
partitions[partition_idx] = std::make_shared<PartitionFilesystem>(std::move(raw));
}
partitions_raw[static_cast<std::size_t>(partition)] = std::move(raw);
}
secure_partition = std::make_shared<NSP>(
@@ -71,13 +71,7 @@ XCI::XCI(VirtualFile file_)
program_nca_status = Loader::ResultStatus::ErrorXCIMissingProgramNCA;
}
auto result = AddNCAFromPartition(XCIPartition::Update);
if (result != Loader::ResultStatus::Success) {
status = result;
return;
}
result = AddNCAFromPartition(XCIPartition::Normal);
auto result = AddNCAFromPartition(XCIPartition::Normal);
if (result != Loader::ResultStatus::Success) {
status = result;
return;
@@ -104,34 +98,114 @@ Loader::ResultStatus XCI::GetProgramNCAStatus() const {
return program_nca_status;
}
VirtualDir XCI::GetPartition(XCIPartition partition) const {
VirtualDir XCI::GetPartition(XCIPartition partition) {
const auto id = static_cast<std::size_t>(partition);
if (partitions[id] == nullptr && partitions_raw[id] != nullptr) {
partitions[id] = std::make_shared<PartitionFilesystem>(partitions_raw[id]);
}
return partitions[static_cast<std::size_t>(partition)];
}
std::vector<VirtualDir> XCI::GetPartitions() {
std::vector<VirtualDir> out;
for (const auto& id :
{XCIPartition::Update, XCIPartition::Normal, XCIPartition::Secure, XCIPartition::Logo}) {
const auto part = GetPartition(id);
if (part != nullptr) {
out.push_back(part);
}
}
return out;
}
std::shared_ptr<NSP> XCI::GetSecurePartitionNSP() const {
return secure_partition;
}
VirtualDir XCI::GetSecurePartition() const {
VirtualDir XCI::GetSecurePartition() {
return GetPartition(XCIPartition::Secure);
}
VirtualDir XCI::GetNormalPartition() const {
VirtualDir XCI::GetNormalPartition() {
return GetPartition(XCIPartition::Normal);
}
VirtualDir XCI::GetUpdatePartition() const {
VirtualDir XCI::GetUpdatePartition() {
return GetPartition(XCIPartition::Update);
}
VirtualDir XCI::GetLogoPartition() const {
VirtualDir XCI::GetLogoPartition() {
return GetPartition(XCIPartition::Logo);
}
VirtualFile XCI::GetPartitionRaw(XCIPartition partition) const {
return partitions_raw[static_cast<std::size_t>(partition)];
}
VirtualFile XCI::GetSecurePartitionRaw() const {
return GetPartitionRaw(XCIPartition::Secure);
}
VirtualFile XCI::GetStoragePartition0() const {
return std::make_shared<OffsetVfsFile>(file, update_normal_partition_end, 0, "partition0");
}
VirtualFile XCI::GetStoragePartition1() const {
return std::make_shared<OffsetVfsFile>(file, file->GetSize() - update_normal_partition_end,
update_normal_partition_end, "partition1");
}
VirtualFile XCI::GetNormalPartitionRaw() const {
return GetPartitionRaw(XCIPartition::Normal);
}
VirtualFile XCI::GetUpdatePartitionRaw() const {
return GetPartitionRaw(XCIPartition::Update);
}
VirtualFile XCI::GetLogoPartitionRaw() const {
return GetPartitionRaw(XCIPartition::Logo);
}
u64 XCI::GetProgramTitleID() const {
return secure_partition->GetProgramTitleID();
}
u32 XCI::GetSystemUpdateVersion() {
const auto update = GetPartition(XCIPartition::Update);
if (update == nullptr)
return 0;
for (const auto& file : update->GetFiles()) {
NCA nca{file, nullptr, 0, keys};
if (nca.GetStatus() != Loader::ResultStatus::Success)
continue;
if (nca.GetType() == NCAContentType::Meta && nca.GetTitleId() == 0x0100000000000816) {
const auto dir = nca.GetSubdirectories()[0];
const auto cnmt = dir->GetFile("SystemUpdate_0100000000000816.cnmt");
if (cnmt == nullptr)
continue;
CNMT cnmt_data{cnmt};
const auto metas = cnmt_data.GetMetaRecords();
if (metas.empty())
continue;
return metas[0].title_version;
}
}
return 0;
}
u64 XCI::GetSystemUpdateTitleID() const {
return 0x0100000000000816;
}
bool XCI::HasProgramNCA() const {
return program != nullptr;
}
@@ -201,7 +275,7 @@ std::array<u8, 0x200> XCI::GetCertificate() const {
Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
const auto partition_index = static_cast<std::size_t>(part);
const auto& partition = partitions[partition_index];
const auto partition = GetPartition(part);
if (partition == nullptr) {
return Loader::ResultStatus::ErrorXCIMissingPartition;
@@ -232,7 +306,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
return Loader::ResultStatus::Success;
}
u8 XCI::GetFormatVersion() const {
u8 XCI::GetFormatVersion() {
return GetLogoPartition() == nullptr ? 0x1 : 0x2;
}
} // namespace FileSys

View File

@@ -81,14 +81,24 @@ public:
Loader::ResultStatus GetStatus() const;
Loader::ResultStatus GetProgramNCAStatus() const;
u8 GetFormatVersion() const;
u8 GetFormatVersion();
VirtualDir GetPartition(XCIPartition partition);
std::vector<VirtualDir> GetPartitions();
VirtualDir GetPartition(XCIPartition partition) const;
std::shared_ptr<NSP> GetSecurePartitionNSP() const;
VirtualDir GetSecurePartition() const;
VirtualDir GetNormalPartition() const;
VirtualDir GetUpdatePartition() const;
VirtualDir GetLogoPartition() const;
VirtualDir GetSecurePartition();
VirtualDir GetNormalPartition();
VirtualDir GetUpdatePartition();
VirtualDir GetLogoPartition();
VirtualFile GetPartitionRaw(XCIPartition partition) const;
VirtualFile GetSecurePartitionRaw() const;
VirtualFile GetStoragePartition0() const;
VirtualFile GetStoragePartition1() const;
VirtualFile GetNormalPartitionRaw() const;
VirtualFile GetUpdatePartitionRaw() const;
VirtualFile GetLogoPartitionRaw() const;
u64 GetProgramTitleID() const;
u32 GetSystemUpdateVersion();
@@ -123,6 +133,7 @@ private:
Loader::ResultStatus program_nca_status;
std::vector<VirtualDir> partitions;
std::vector<VirtualFile> partitions_raw;
std::shared_ptr<NSP> secure_partition;
std::shared_ptr<NCA> program;
std::vector<std::shared_ptr<NCA>> ncas;

View File

@@ -65,6 +65,9 @@ PartitionFilesystem::PartitionFilesystem(std::shared_ptr<VfsFile> file) {
std::string name(
reinterpret_cast<const char*>(&file_data[strtab_offset + entry.strtab_offset]));
offsets.insert_or_assign(name, content_offset + entry.offset);
sizes.insert_or_assign(name, entry.size);
pfs_files.emplace_back(std::make_shared<OffsetVfsFile>(
file, entry.size, content_offset + entry.offset, std::move(name)));
}
@@ -78,6 +81,14 @@ Loader::ResultStatus PartitionFilesystem::GetStatus() const {
return status;
}
std::map<std::string, u64> PartitionFilesystem::GetFileOffsets() const {
return offsets;
}
std::map<std::string, u64> PartitionFilesystem::GetFileSizes() const {
return sizes;
}
std::vector<std::shared_ptr<VfsFile>> PartitionFilesystem::GetFiles() const {
return pfs_files;
}

View File

@@ -29,6 +29,9 @@ public:
Loader::ResultStatus GetStatus() const;
std::map<std::string, u64> GetFileOffsets() const;
std::map<std::string, u64> GetFileSizes() const;
std::vector<std::shared_ptr<VfsFile>> GetFiles() const override;
std::vector<std::shared_ptr<VfsDirectory>> GetSubdirectories() const override;
std::string GetName() const override;
@@ -80,6 +83,9 @@ private:
bool is_hfs = false;
std::size_t content_offset = 0;
std::map<std::string, u64> offsets;
std::map<std::string, u64> sizes;
std::vector<VirtualFile> pfs_files;
};

View File

@@ -16,6 +16,7 @@ namespace FileSys {
constexpr char SAVE_DATA_SIZE_FILENAME[] = ".yuzu_save_size";
namespace {
void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
if (meta.type == SaveDataType::SystemSaveData || meta.type == SaveDataType::SaveData) {
if (meta.zero_1 != 0) {
@@ -52,6 +53,13 @@ void PrintSaveDataDescriptorWarnings(SaveDataDescriptor meta) {
meta.user_id[1], meta.user_id[0]);
}
}
bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataDescriptor& desc) {
return desc.type == SaveDataType::CacheStorage || desc.type == SaveDataType::TemporaryStorage ||
(space == SaveDataSpaceId::NandUser && ///< Normal Save Data -- Current Title & User
desc.type == SaveDataType::SaveData && desc.title_id == 0 && desc.save_id == 0);
}
} // Anonymous namespace
std::string SaveDataDescriptor::DebugInfo() const {
@@ -96,6 +104,10 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
auto out = dir->GetDirectoryRelative(save_directory);
if (out == nullptr && ShouldSaveDataBeAutomaticallyCreated(space, meta)) {
return Create(space, meta);
}
// Return an error if the save data doesn't actually exist.
if (out == nullptr) {
// TODO(Subv): Find out correct error code.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 217276> FONT_CHINESE_SIMPLIFIED;
} // namespace FileSys::SystemArchive::SharedFontData

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 222236> FONT_CHINESE_TRADITIONAL;
} // namespace FileSys::SystemArchive::SharedFontData

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 293516> FONT_EXTENDED_CHINESE_SIMPLIFIED;
} // namespace FileSys::SystemArchive::SharedFontData

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 217276> FONT_KOREAN;
} // namespace FileSys::SystemArchive::SharedFontData

View File

@@ -0,0 +1,196 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/file_sys/system_archive/data/font_nintendo_extended.h"
namespace FileSys::SystemArchive::SharedFontData {
const std::array<unsigned char, 2932> FONT_NINTENDO_EXTENDED{{
0x00, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x80, 0x00, 0x03, 0x00, 0x70, 0x44, 0x53, 0x49, 0x47,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0b, 0x6c, 0x00, 0x00, 0x00, 0x08, 0x4f, 0x53, 0x2f, 0x32,
0x33, 0x86, 0x1d, 0x9b, 0x00, 0x00, 0x01, 0x78, 0x00, 0x00, 0x00, 0x60, 0x63, 0x6d, 0x61, 0x70,
0xc2, 0x06, 0x20, 0xde, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x63, 0x76, 0x74, 0x20,
0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x04, 0x2c, 0x00, 0x00, 0x00, 0x06, 0x66, 0x70, 0x67, 0x6d,
0x06, 0x59, 0x9c, 0x37, 0x00, 0x00, 0x02, 0xa0, 0x00, 0x00, 0x01, 0x73, 0x67, 0x61, 0x73, 0x70,
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x0b, 0x64, 0x00, 0x00, 0x00, 0x08, 0x67, 0x6c, 0x79, 0x66,
0x10, 0x31, 0x88, 0x00, 0x00, 0x00, 0x04, 0x34, 0x00, 0x00, 0x04, 0x64, 0x68, 0x65, 0x61, 0x64,
0x15, 0x9d, 0xef, 0x91, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, 0x36, 0x68, 0x68, 0x65, 0x61,
0x09, 0x60, 0x03, 0x71, 0x00, 0x00, 0x01, 0x34, 0x00, 0x00, 0x00, 0x24, 0x68, 0x6d, 0x74, 0x78,
0x0d, 0x2e, 0x03, 0xa7, 0x00, 0x00, 0x01, 0xd8, 0x00, 0x00, 0x00, 0x26, 0x6c, 0x6f, 0x63, 0x61,
0x05, 0xc0, 0x04, 0x6c, 0x00, 0x00, 0x08, 0x98, 0x00, 0x00, 0x00, 0x1e, 0x6d, 0x61, 0x78, 0x70,
0x02, 0x1c, 0x00, 0x5f, 0x00, 0x00, 0x01, 0x58, 0x00, 0x00, 0x00, 0x20, 0x6e, 0x61, 0x6d, 0x65,
0x7c, 0xe0, 0x84, 0x5c, 0x00, 0x00, 0x08, 0xb8, 0x00, 0x00, 0x02, 0x09, 0x70, 0x6f, 0x73, 0x74,
0x47, 0x4e, 0x74, 0x19, 0x00, 0x00, 0x0a, 0xc4, 0x00, 0x00, 0x00, 0x9e, 0x70, 0x72, 0x65, 0x70,
0x1c, 0xfc, 0x7d, 0x9c, 0x00, 0x00, 0x04, 0x14, 0x00, 0x00, 0x00, 0x16, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x7c, 0xc7, 0xb1, 0x63, 0x5f, 0x0f, 0x3c, 0xf5, 0x00, 0x1b, 0x03, 0xe8,
0x00, 0x00, 0x00, 0x00, 0xd9, 0x44, 0x2f, 0x5d, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x45, 0x7b, 0x69,
0x00, 0x00, 0x00, 0x00, 0x03, 0xe6, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0x84, 0xff, 0x83, 0x01, 0xf4, 0x03, 0xe8,
0x00, 0x00, 0x00, 0x00, 0x03, 0xe6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x5e,
0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x74, 0x01, 0x90, 0x00, 0x05,
0x00, 0x04, 0x00, 0xcd, 0x00, 0xcd, 0x00, 0x00, 0x01, 0x1f, 0x00, 0xcd, 0x00, 0xcd, 0x00, 0x00,
0x03, 0xc3, 0x00, 0x66, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x00, 0xc0, 0x00, 0x00, 0xe0, 0xe9, 0x03, 0x84, 0xff, 0x83,
0x01, 0xf4, 0x02, 0xee, 0x00, 0xfa, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8,
0x02, 0xbc, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfa, 0x00, 0x00, 0x00, 0xfa, 0x00, 0x00, 0x03, 0xe8, 0x00, 0xeb, 0x01, 0x21, 0x00, 0xff,
0x00, 0xff, 0x01, 0x3d, 0x01, 0x17, 0x00, 0x42, 0x00, 0x1c, 0x00, 0x3e, 0x00, 0x17, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x68, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1c, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x68, 0x00, 0x06, 0x00, 0x4c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x08, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x20, 0xe0, 0xe9, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x20, 0xe0, 0xe0, 0xff, 0xff, 0x00, 0x01, 0xff, 0xf5,
0xff, 0xe3, 0x1f, 0x24, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb8, 0x00, 0x00, 0x2c, 0x4b, 0xb8, 0x00, 0x09, 0x50, 0x58, 0xb1, 0x01, 0x01, 0x8e, 0x59, 0xb8,
0x01, 0xff, 0x85, 0xb8, 0x00, 0x44, 0x1d, 0xb9, 0x00, 0x09, 0x00, 0x03, 0x5f, 0x5e, 0x2d, 0xb8,
0x00, 0x01, 0x2c, 0x20, 0x20, 0x45, 0x69, 0x44, 0xb0, 0x01, 0x60, 0x2d, 0xb8, 0x00, 0x02, 0x2c,
0xb8, 0x00, 0x01, 0x2a, 0x21, 0x2d, 0xb8, 0x00, 0x03, 0x2c, 0x20, 0x46, 0xb0, 0x03, 0x25, 0x46,
0x52, 0x58, 0x23, 0x59, 0x20, 0x8a, 0x20, 0x8a, 0x49, 0x64, 0x8a, 0x20, 0x46, 0x20, 0x68, 0x61,
0x64, 0xb0, 0x04, 0x25, 0x46, 0x20, 0x68, 0x61, 0x64, 0x52, 0x58, 0x23, 0x65, 0x8a, 0x59, 0x2f,
0x20, 0xb0, 0x00, 0x53, 0x58, 0x69, 0x20, 0xb0, 0x00, 0x54, 0x58, 0x21, 0xb0, 0x40, 0x59, 0x1b,
0x69, 0x20, 0xb0, 0x00, 0x54, 0x58, 0x21, 0xb0, 0x40, 0x65, 0x59, 0x59, 0x3a, 0x2d, 0xb8, 0x00,
0x04, 0x2c, 0x20, 0x46, 0xb0, 0x04, 0x25, 0x46, 0x52, 0x58, 0x23, 0x8a, 0x59, 0x20, 0x46, 0x20,
0x6a, 0x61, 0x64, 0xb0, 0x04, 0x25, 0x46, 0x20, 0x6a, 0x61, 0x64, 0x52, 0x58, 0x23, 0x8a, 0x59,
0x2f, 0xfd, 0x2d, 0xb8, 0x00, 0x05, 0x2c, 0x4b, 0x20, 0xb0, 0x03, 0x26, 0x50, 0x58, 0x51, 0x58,
0xb0, 0x80, 0x44, 0x1b, 0xb0, 0x40, 0x44, 0x59, 0x1b, 0x21, 0x21, 0x20, 0x45, 0xb0, 0xc0, 0x50,
0x58, 0xb0, 0xc0, 0x44, 0x1b, 0x21, 0x59, 0x59, 0x2d, 0xb8, 0x00, 0x06, 0x2c, 0x20, 0x20, 0x45,
0x69, 0x44, 0xb0, 0x01, 0x60, 0x20, 0x20, 0x45, 0x7d, 0x69, 0x18, 0x44, 0xb0, 0x01, 0x60, 0x2d,
0xb8, 0x00, 0x07, 0x2c, 0xb8, 0x00, 0x06, 0x2a, 0x2d, 0xb8, 0x00, 0x08, 0x2c, 0x4b, 0x20, 0xb0,
0x03, 0x26, 0x53, 0x58, 0xb0, 0x40, 0x1b, 0xb0, 0x00, 0x59, 0x8a, 0x8a, 0x20, 0xb0, 0x03, 0x26,
0x53, 0x58, 0x23, 0x21, 0xb0, 0x80, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0, 0x03, 0x26,
0x53, 0x58, 0x23, 0x21, 0xb8, 0x00, 0xc0, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0, 0x03,
0x26, 0x53, 0x58, 0x23, 0x21, 0xb8, 0x01, 0x00, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0,
0x03, 0x26, 0x53, 0x58, 0x23, 0x21, 0xb8, 0x01, 0x40, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20,
0xb8, 0x00, 0x03, 0x26, 0x53, 0x58, 0xb0, 0x03, 0x25, 0x45, 0xb8, 0x01, 0x80, 0x50, 0x58, 0x23,
0x21, 0xb8, 0x01, 0x80, 0x23, 0x21, 0x1b, 0xb0, 0x03, 0x25, 0x45, 0x23, 0x21, 0x23, 0x21, 0x59,
0x1b, 0x21, 0x59, 0x44, 0x2d, 0xb8, 0x00, 0x09, 0x2c, 0x4b, 0x53, 0x58, 0x45, 0x44, 0x1b, 0x21,
0x21, 0x59, 0x2d, 0x00, 0xb8, 0x00, 0x00, 0x2b, 0x00, 0xba, 0x00, 0x01, 0x00, 0x01, 0x00, 0x07,
0x2b, 0xb8, 0x00, 0x00, 0x20, 0x45, 0x7d, 0x69, 0x18, 0x44, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x03, 0xe6, 0x03, 0xe8, 0x00, 0x06,
0x00, 0x00, 0x35, 0x01, 0x33, 0x15, 0x01, 0x23, 0x35, 0x03, 0x52, 0x94, 0xfc, 0xa6, 0x8c, 0x90,
0x03, 0x58, 0x86, 0xfc, 0xa0, 0x8e, 0x00, 0x00, 0x00, 0x02, 0x00, 0xeb, 0x00, 0xcc, 0x02, 0xfb,
0x03, 0x1e, 0x00, 0x08, 0x00, 0x0f, 0x00, 0x00, 0x01, 0x33, 0x13, 0x23, 0x27, 0x23, 0x07, 0x23,
0x13, 0x17, 0x07, 0x06, 0x15, 0x33, 0x27, 0x07, 0x01, 0xbc, 0x6d, 0xd2, 0x7c, 0x26, 0xcc, 0x26,
0x7c, 0xd1, 0x35, 0x40, 0x02, 0x89, 0x45, 0x02, 0x03, 0x1e, 0xfd, 0xae, 0x77, 0x77, 0x02, 0x52,
0x9b, 0xcc, 0x08, 0x04, 0xda, 0x02, 0x00, 0x00, 0x00, 0x03, 0x01, 0x21, 0x00, 0xcc, 0x02, 0xc5,
0x03, 0x1e, 0x00, 0x15, 0x00, 0x1f, 0x00, 0x2b, 0x00, 0x00, 0x25, 0x11, 0x33, 0x32, 0x1e, 0x02,
0x15, 0x14, 0x0e, 0x02, 0x07, 0x1e, 0x01, 0x15, 0x14, 0x0e, 0x02, 0x2b, 0x01, 0x13, 0x33, 0x32,
0x36, 0x35, 0x34, 0x26, 0x2b, 0x01, 0x1d, 0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x2b,
0x01, 0x15, 0x01, 0x21, 0xea, 0x25, 0x3f, 0x2e, 0x1a, 0x0e, 0x15, 0x1b, 0x0e, 0x2d, 0x2d, 0x1a,
0x2e, 0x3f, 0x25, 0xf8, 0x76, 0x62, 0x20, 0x2a, 0x28, 0x22, 0x62, 0x76, 0x10, 0x18, 0x11, 0x09,
0x22, 0x22, 0x74, 0xcc, 0x02, 0x52, 0x18, 0x2b, 0x3c, 0x24, 0x1d, 0x1f, 0x17, 0x17, 0x14, 0x0f,
0x48, 0x2f, 0x24, 0x3f, 0x2e, 0x1a, 0x01, 0x5b, 0x29, 0x20, 0x20, 0x2b, 0x94, 0xf8, 0x0e, 0x16,
0x1c, 0x0e, 0x1f, 0x31, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xff, 0x00, 0xcc, 0x02, 0xe7,
0x03, 0x1e, 0x00, 0x0c, 0x00, 0x00, 0x01, 0x33, 0x17, 0x37, 0x33, 0x03, 0x13, 0x23, 0x27, 0x07,
0x23, 0x13, 0x03, 0x01, 0x04, 0x86, 0x69, 0x69, 0x86, 0xa3, 0xa8, 0x88, 0x6c, 0x6c, 0x88, 0xa8,
0xa3, 0x03, 0x1e, 0xcb, 0xcb, 0xfe, 0xda, 0xfe, 0xd4, 0xcf, 0xcf, 0x01, 0x2c, 0x01, 0x26, 0x00,
0x00, 0x01, 0x00, 0xff, 0x00, 0xcc, 0x02, 0xe7, 0x03, 0x1e, 0x00, 0x0f, 0x00, 0x00, 0x01, 0x03,
0x33, 0x17, 0x32, 0x15, 0x1e, 0x01, 0x15, 0x1b, 0x01, 0x33, 0x03, 0x15, 0x23, 0x35, 0x01, 0xb8,
0xb9, 0x7e, 0x01, 0x01, 0x01, 0x03, 0x70, 0x75, 0x7f, 0xb9, 0x76, 0x01, 0xa3, 0x01, 0x7b, 0x01,
0x01, 0x01, 0x05, 0x02, 0xff, 0x00, 0x01, 0x0a, 0xfe, 0x85, 0xd7, 0xd7, 0x00, 0x01, 0x01, 0x3d,
0x00, 0xcc, 0x02, 0xa9, 0x03, 0x1e, 0x00, 0x06, 0x00, 0x00, 0x25, 0x11, 0x33, 0x11, 0x33, 0x15,
0x21, 0x01, 0x3d, 0x75, 0xf7, 0xfe, 0x94, 0xcc, 0x02, 0x52, 0xfe, 0x10, 0x62, 0x00, 0x00, 0x00,
0x00, 0x02, 0x01, 0x17, 0x00, 0xbc, 0x02, 0xcf, 0x03, 0x0e, 0x00, 0x15, 0x00, 0x21, 0x00, 0x00,
0x25, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x1d, 0x01, 0x0e, 0x03, 0x1d, 0x01, 0x17, 0x15, 0x23, 0x27,
0x23, 0x15, 0x23, 0x13, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x2b, 0x01, 0x15, 0x01, 0x17,
0xf4, 0x27, 0x40, 0x2e, 0x19, 0x01, 0x1f, 0x24, 0x1e, 0x78, 0x7d, 0x6a, 0x5c, 0x75, 0x76, 0x72,
0x12, 0x19, 0x11, 0x08, 0x26, 0x26, 0x6a, 0xbc, 0x02, 0x52, 0x1d, 0x31, 0x42, 0x25, 0x16, 0x18,
0x32, 0x2a, 0x1b, 0x02, 0x01, 0xef, 0x06, 0xd7, 0xd7, 0x01, 0x3f, 0x10, 0x1a, 0x1e, 0x0f, 0x23,
0x36, 0xb0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x42, 0x00, 0xbc, 0x03, 0xa4, 0x03, 0x0e, 0x00, 0x0a,
0x00, 0x11, 0x00, 0x00, 0x13, 0x35, 0x21, 0x15, 0x01, 0x21, 0x15, 0x21, 0x35, 0x01, 0x21, 0x01,
0x11, 0x33, 0x11, 0x33, 0x15, 0x21, 0x42, 0x01, 0xa7, 0xfe, 0xeb, 0x01, 0x1b, 0xfe, 0x53, 0x01,
0x15, 0xfe, 0xeb, 0x01, 0xf7, 0x75, 0xf6, 0xfe, 0x95, 0x02, 0xac, 0x62, 0x45, 0xfe, 0x55, 0x62,
0x47, 0x01, 0xa9, 0xfe, 0x10, 0x02, 0x52, 0xfe, 0x10, 0x62, 0x00, 0x00, 0x00, 0x03, 0x00, 0x1c,
0x00, 0xbc, 0x03, 0xca, 0x03, 0x0e, 0x00, 0x0a, 0x00, 0x21, 0x00, 0x2f, 0x00, 0x00, 0x13, 0x35,
0x21, 0x15, 0x01, 0x21, 0x15, 0x21, 0x35, 0x01, 0x21, 0x01, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x15,
0x14, 0x06, 0x07, 0x0e, 0x03, 0x15, 0x17, 0x15, 0x23, 0x27, 0x23, 0x15, 0x23, 0x13, 0x33, 0x32,
0x3e, 0x02, 0x35, 0x34, 0x2e, 0x02, 0x2b, 0x01, 0x15, 0x1c, 0x01, 0xa7, 0xfe, 0xeb, 0x01, 0x1b,
0xfe, 0x53, 0x01, 0x15, 0xfe, 0xeb, 0x01, 0xf7, 0xf3, 0x27, 0x41, 0x2d, 0x19, 0x1c, 0x20, 0x01,
0x0d, 0x0e, 0x0a, 0x78, 0x7d, 0x69, 0x5c, 0x75, 0x76, 0x71, 0x11, 0x1a, 0x12, 0x09, 0x0a, 0x14,
0x1d, 0x13, 0x69, 0x02, 0xac, 0x62, 0x45, 0xfe, 0x55, 0x62, 0x47, 0x01, 0xa9, 0xfe, 0x10, 0x02,
0x52, 0x1d, 0x31, 0x42, 0x25, 0x2b, 0x44, 0x1d, 0x01, 0x08, 0x09, 0x07, 0x01, 0xf1, 0x06, 0xd7,
0xd7, 0x01, 0x3f, 0x11, 0x19, 0x1f, 0x0e, 0x11, 0x20, 0x19, 0x0f, 0xb0, 0x00, 0x02, 0x00, 0x3e,
0x00, 0xb3, 0x03, 0xa8, 0x03, 0x17, 0x00, 0x3a, 0x00, 0x41, 0x00, 0x00, 0x13, 0x34, 0x3e, 0x02,
0x33, 0x32, 0x1e, 0x02, 0x15, 0x23, 0x27, 0x34, 0x27, 0x2e, 0x01, 0x23, 0x22, 0x0e, 0x02, 0x15,
0x14, 0x16, 0x15, 0x1e, 0x05, 0x15, 0x14, 0x0e, 0x02, 0x23, 0x22, 0x2e, 0x02, 0x35, 0x33, 0x1e,
0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x2e, 0x04, 0x35, 0x01, 0x11, 0x33, 0x11, 0x33, 0x15,
0x21, 0x50, 0x24, 0x3b, 0x4a, 0x27, 0x28, 0x4b, 0x39, 0x22, 0x73, 0x01, 0x01, 0x08, 0x2b, 0x29,
0x10, 0x20, 0x19, 0x0f, 0x01, 0x0b, 0x35, 0x41, 0x46, 0x3b, 0x25, 0x23, 0x3a, 0x4b, 0x27, 0x2b,
0x50, 0x3f, 0x26, 0x74, 0x05, 0x34, 0x33, 0x10, 0x20, 0x1a, 0x11, 0x2c, 0x42, 0x4d, 0x42, 0x2c,
0x01, 0xef, 0x73, 0xf6, 0xfe, 0x97, 0x02, 0x70, 0x2a, 0x3f, 0x2a, 0x14, 0x18, 0x2e, 0x44, 0x2c,
0x02, 0x03, 0x01, 0x27, 0x27, 0x07, 0x10, 0x1a, 0x12, 0x02, 0x0b, 0x02, 0x1f, 0x22, 0x19, 0x17,
0x27, 0x3f, 0x34, 0x2c, 0x3e, 0x28, 0x13, 0x1a, 0x32, 0x48, 0x2e, 0x30, 0x30, 0x06, 0x0f, 0x1a,
0x13, 0x21, 0x27, 0x1e, 0x1b, 0x29, 0x3e, 0x31, 0xfe, 0x4c, 0x02, 0x53, 0xfe, 0x10, 0x63, 0x00,
0x00, 0x03, 0x00, 0x17, 0x00, 0xb3, 0x03, 0xce, 0x03, 0x17, 0x00, 0x38, 0x00, 0x4f, 0x00, 0x5d,
0x00, 0x00, 0x13, 0x34, 0x3e, 0x02, 0x33, 0x32, 0x1e, 0x02, 0x15, 0x23, 0x27, 0x34, 0x23, 0x2e,
0x01, 0x23, 0x22, 0x0e, 0x02, 0x15, 0x14, 0x1e, 0x04, 0x15, 0x14, 0x0e, 0x02, 0x23, 0x22, 0x2e,
0x02, 0x35, 0x33, 0x1e, 0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x27, 0x2e, 0x03, 0x35,
0x01, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x15, 0x14, 0x06, 0x07, 0x30, 0x0e, 0x02, 0x31, 0x17, 0x15,
0x23, 0x27, 0x23, 0x15, 0x23, 0x13, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x2e, 0x02, 0x2b, 0x01,
0x15, 0x2a, 0x24, 0x3a, 0x4a, 0x26, 0x29, 0x4b, 0x39, 0x23, 0x73, 0x01, 0x01, 0x08, 0x2a, 0x2a,
0x10, 0x1f, 0x1a, 0x10, 0x2c, 0x42, 0x4d, 0x42, 0x2c, 0x23, 0x39, 0x4b, 0x27, 0x2b, 0x51, 0x3f,
0x27, 0x75, 0x05, 0x34, 0x33, 0x10, 0x20, 0x1a, 0x10, 0x1f, 0x1c, 0x25, 0x53, 0x47, 0x2e, 0x01,
0xed, 0xf3, 0x27, 0x41, 0x2d, 0x19, 0x1c, 0x20, 0x0c, 0x0e, 0x0c, 0x78, 0x7d, 0x68, 0x5d, 0x75,
0x76, 0x71, 0x11, 0x1a, 0x12, 0x09, 0x0a, 0x14, 0x1d, 0x13, 0x69, 0x02, 0x71, 0x2a, 0x3e, 0x2a,
0x14, 0x18, 0x2e, 0x44, 0x2c, 0x02, 0x02, 0x27, 0x29, 0x07, 0x11, 0x1a, 0x12, 0x1d, 0x24, 0x1c,
0x1d, 0x2b, 0x40, 0x32, 0x2c, 0x3f, 0x29, 0x13, 0x1a, 0x31, 0x49, 0x2e, 0x30, 0x30, 0x06, 0x0f,
0x19, 0x13, 0x1e, 0x22, 0x0b, 0x0e, 0x20, 0x2f, 0x43, 0x30, 0xfe, 0x4b, 0x02, 0x52, 0x1d, 0x32,
0x42, 0x25, 0x2c, 0x42, 0x1d, 0x08, 0x0a, 0x08, 0xf1, 0x06, 0xd7, 0xd7, 0x01, 0x3f, 0x11, 0x19,
0x1f, 0x0e, 0x11, 0x20, 0x19, 0x0f, 0xb0, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x12, 0x00, 0x12,
0x00, 0x12, 0x00, 0x32, 0x00, 0x72, 0x00, 0x8e, 0x00, 0xac, 0x00, 0xbe, 0x00, 0xf0, 0x01, 0x14,
0x01, 0x5c, 0x01, 0xb6, 0x02, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0xa2, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x07, 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x2f,
0x00, 0x17, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x12, 0x00, 0x46, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0d, 0x00, 0x58, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x06, 0x00, 0x12, 0x00, 0x65, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x01, 0x00, 0x20,
0x00, 0x77, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x97, 0x00, 0x03,
0x00, 0x01, 0x04, 0x09, 0x00, 0x03, 0x00, 0x5e, 0x00, 0xa5, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
0x00, 0x04, 0x00, 0x24, 0x01, 0x03, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x05, 0x00, 0x1a,
0x01, 0x27, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x06, 0x00, 0x24, 0x01, 0x41, 0x00, 0x03,
0x00, 0x01, 0x04, 0x09, 0x00, 0x11, 0x00, 0x02, 0x01, 0x65, 0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53,
0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x67, 0x75, 0x6c, 0x61,
0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x30, 0x30, 0x30, 0x3b, 0x3b,
0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x2d, 0x52, 0x3b, 0x32, 0x30, 0x31, 0x39, 0x3b, 0x46, 0x4c, 0x56, 0x49, 0x2d, 0x36, 0x31, 0x34,
0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x20, 0x52, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x30, 0x30, 0x30, 0x59,
0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2d,
0x52, 0x00, 0x59, 0x00, 0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00,
0x45, 0x00, 0x78, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00,
0x6e, 0x00, 0x52, 0x00, 0x65, 0x00, 0x67, 0x00, 0x75, 0x00, 0x6c, 0x00, 0x61, 0x00, 0x72, 0x00,
0x56, 0x00, 0x65, 0x00, 0x72, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x20, 0x00,
0x31, 0x00, 0x2e, 0x00, 0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x3b, 0x00, 0x3b, 0x00, 0x59, 0x00,
0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00,
0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x2d, 0x00,
0x52, 0x00, 0x3b, 0x00, 0x32, 0x00, 0x30, 0x00, 0x31, 0x00, 0x39, 0x00, 0x3b, 0x00, 0x46, 0x00,
0x4c, 0x00, 0x56, 0x00, 0x49, 0x00, 0x2d, 0x00, 0x36, 0x00, 0x31, 0x00, 0x34, 0x00, 0x59, 0x00,
0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00,
0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x20, 0x00,
0x52, 0x00, 0x56, 0x00, 0x65, 0x00, 0x72, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00,
0x20, 0x00, 0x31, 0x00, 0x2e, 0x00, 0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x59, 0x00, 0x75, 0x00,
0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00, 0x74, 0x00,
0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x2d, 0x00, 0x52, 0x00,
0x52, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x9c, 0x00, 0x32,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x01, 0x02, 0x01, 0x03, 0x00, 0x03, 0x01, 0x04,
0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0a, 0x01, 0x0b, 0x01, 0x0c,
0x01, 0x0d, 0x07, 0x75, 0x6e, 0x69, 0x30, 0x30, 0x30, 0x30, 0x07, 0x75, 0x6e, 0x69, 0x30, 0x30,
0x30, 0x44, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x30, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
0x45, 0x31, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x32, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
0x45, 0x33, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x34, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
0x45, 0x35, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x36, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
0x45, 0x37, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x38, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
0x45, 0x39, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0xff, 0xff, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
}};
} // namespace FileSys::SystemArchive::SharedFontData

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 2932> FONT_NINTENDO_EXTENDED;
} // namespace FileSys::SystemArchive::SharedFontData

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
namespace FileSys::SystemArchive::SharedFontData {
extern const std::array<unsigned char, 217276> FONT_STANDARD;
} // namespace FileSys::SystemArchive::SharedFontData

View File

@@ -0,0 +1,78 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/file_sys/system_archive/data/font_chinese_simplified.h"
#include "core/file_sys/system_archive/data/font_chinese_traditional.h"
#include "core/file_sys/system_archive/data/font_extended_chinese_simplified.h"
#include "core/file_sys/system_archive/data/font_korean.h"
#include "core/file_sys/system_archive/data/font_nintendo_extended.h"
#include "core/file_sys/system_archive/data/font_standard.h"
#include "core/file_sys/system_archive/shared_font.h"
#include "core/file_sys/vfs_vector.h"
#include "core/hle/service/ns/pl_u.h"
namespace FileSys::SystemArchive {
namespace {
template <std::size_t Size>
VirtualFile PackBFTTF(const std::array<u8, Size>& data, const std::string& name) {
std::vector<u32> vec(Size / sizeof(u32));
std::memcpy(vec.data(), data.data(), vec.size() * sizeof(u32));
std::vector<u8> bfttf(Size + sizeof(u64));
u64 offset = 0;
Service::NS::EncryptSharedFont(vec, bfttf, offset);
return std::make_shared<VectorVfsFile>(std::move(bfttf), name);
}
} // Anonymous namespace
VirtualDir FontNintendoExtension() {
return std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{
PackBFTTF(SharedFontData::FONT_NINTENDO_EXTENDED, "nintendo_ext_003.bfttf"),
PackBFTTF(SharedFontData::FONT_NINTENDO_EXTENDED, "nintendo_ext2_003.bfttf"),
},
std::vector<VirtualDir>{});
}
VirtualDir FontStandard() {
return std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{
PackBFTTF(SharedFontData::FONT_STANDARD, "nintendo_udsg-r_std_003.bfttf"),
},
std::vector<VirtualDir>{});
}
VirtualDir FontKorean() {
return std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{
PackBFTTF(SharedFontData::FONT_KOREAN, "nintendo_udsg-r_ko_003.bfttf"),
},
std::vector<VirtualDir>{});
}
VirtualDir FontChineseTraditional() {
return std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{
PackBFTTF(SharedFontData::FONT_CHINESE_TRADITIONAL,
"nintendo_udjxh-db_zh-tw_003.bfttf"),
},
std::vector<VirtualDir>{});
}
VirtualDir FontChineseSimple() {
return std::make_shared<VectorVfsDirectory>(
std::vector<VirtualFile>{
PackBFTTF(SharedFontData::FONT_CHINESE_SIMPLIFIED,
"nintendo_udsg-r_org_zh-cn_003.bfttf"),
PackBFTTF(SharedFontData::FONT_EXTENDED_CHINESE_SIMPLIFIED,
"nintendo_udsg-r_ext_zh-cn_003.bfttf"),
},
std::vector<VirtualDir>{});
}
} // namespace FileSys::SystemArchive

View File

@@ -0,0 +1,17 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/file_sys/vfs_types.h"
namespace FileSys::SystemArchive {
VirtualDir FontNintendoExtension();
VirtualDir FontStandard();
VirtualDir FontKorean();
VirtualDir FontChineseTraditional();
VirtualDir FontChineseSimple();
} // namespace FileSys::SystemArchive

View File

@@ -6,6 +6,7 @@
#include "core/file_sys/romfs.h"
#include "core/file_sys/system_archive/mii_model.h"
#include "core/file_sys/system_archive/ng_word.h"
#include "core/file_sys/system_archive/shared_font.h"
#include "core/file_sys/system_archive/system_archive.h"
#include "core/file_sys/system_archive/system_version.h"
@@ -39,11 +40,11 @@ constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHI
{0x010000000000080D, "UrlBlackList", nullptr},
{0x010000000000080E, "TimeZoneBinary", nullptr},
{0x010000000000080F, "CertStoreCruiser", nullptr},
{0x0100000000000810, "FontNintendoExtension", nullptr},
{0x0100000000000811, "FontStandard", nullptr},
{0x0100000000000812, "FontKorean", nullptr},
{0x0100000000000813, "FontChineseTraditional", nullptr},
{0x0100000000000814, "FontChineseSimple", nullptr},
{0x0100000000000810, "FontNintendoExtension", &FontNintendoExtension},
{0x0100000000000811, "FontStandard", &FontStandard},
{0x0100000000000812, "FontKorean", &FontKorean},
{0x0100000000000813, "FontChineseTraditional", &FontChineseTraditional},
{0x0100000000000814, "FontChineseSimple", &FontChineseSimple},
{0x0100000000000815, "FontBfcpx", nullptr},
{0x0100000000000816, "SystemUpdate", nullptr},
{0x0100000000000817, "0100000000000817", nullptr},

View File

@@ -202,13 +202,11 @@ void RegisterModule(std::string name, VAddr beg, VAddr end, bool add_elf_ext) {
}
static Kernel::Thread* FindThreadById(s64 id) {
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadID() == static_cast<u64>(id)) {
current_core = core;
return thread.get();
}
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
for (auto& thread : threads) {
if (thread->GetThreadID() == static_cast<u64>(id)) {
current_core = thread->GetProcessorID();
return thread.get();
}
}
return nullptr;
@@ -647,11 +645,9 @@ static void HandleQuery() {
SendReply(buffer.c_str());
} else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
std::string val = "m";
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
for (const auto& thread : threads) {
val += fmt::format("{:x},", thread->GetThreadID());
}
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
for (const auto& thread : threads) {
val += fmt::format("{:x},", thread->GetThreadID());
}
val.pop_back();
SendReply(val.c_str());
@@ -661,13 +657,11 @@ static void HandleQuery() {
std::string buffer;
buffer += "l<?xml version=\"1.0\"?>";
buffer += "<threads>";
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
for (const auto& thread : threads) {
buffer +=
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
thread->GetThreadID(), core, thread->GetThreadID());
}
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
for (const auto& thread : threads) {
buffer +=
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
thread->GetThreadID(), thread->GetProcessorID(), thread->GetThreadID());
}
buffer += "</threads>";
SendReply(buffer.c_str());

View File

@@ -22,6 +22,7 @@ namespace Kernel {
namespace {
// Wake up num_to_wake (or all) threads in a vector.
void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
auto& system = Core::System::GetInstance();
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
// them all.
std::size_t last = waiting_threads.size();
@@ -35,6 +36,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
waiting_threads[i]->SetArbiterWaitAddress(0);
waiting_threads[i]->ResumeFromWait();
system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
}
}
} // Anonymous namespace
@@ -89,12 +91,20 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
// Determine the modified value depending on the waiting count.
s32 updated_value;
if (waiting_threads.empty()) {
updated_value = value + 1;
} else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value - 1;
if (num_to_wake <= 0) {
if (waiting_threads.empty()) {
updated_value = value + 1;
} else {
updated_value = value - 1;
}
} else {
updated_value = value;
if (waiting_threads.empty()) {
updated_value = value + 1;
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
updated_value = value - 1;
} else {
updated_value = value;
}
}
if (static_cast<s32>(Memory::Read32(address)) != value) {
@@ -169,30 +179,22 @@ ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) {
current_thread->WakeAfterDelay(timeout);
system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(current_thread->GetProcessorID());
return RESULT_TIMEOUT;
}
std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const {
const auto RetrieveWaitingThreads = [this](std::size_t core_index,
std::vector<SharedPtr<Thread>>& waiting_threads,
VAddr arb_addr) {
const auto& scheduler = system.Scheduler(core_index);
const auto& thread_list = scheduler.GetThreadList();
for (const auto& thread : thread_list) {
if (thread->GetArbiterWaitAddress() == arb_addr) {
waiting_threads.push_back(thread);
}
}
};
// Retrieve all threads that are waiting for this address.
std::vector<SharedPtr<Thread>> threads;
RetrieveWaitingThreads(0, threads, address);
RetrieveWaitingThreads(1, threads, address);
RetrieveWaitingThreads(2, threads, address);
RetrieveWaitingThreads(3, threads, address);
const auto& scheduler = system.GlobalScheduler();
const auto& thread_list = scheduler.GetThreadList();
for (const auto& thread : thread_list) {
if (thread->GetArbiterWaitAddress() == address) {
threads.push_back(thread);
}
}
// Sort them by priority, such that the highest priority ones come first.
std::sort(threads.begin(), threads.end(),

View File

@@ -58,8 +58,7 @@ SharedPtr<WritableEvent> HLERequestContext::SleepClientThread(
auto& kernel = Core::System::GetInstance().Kernel();
if (!writable_event) {
// Create event if not provided
const auto pair = WritableEvent::CreateEventPair(kernel, ResetType::Automatic,
"HLE Pause Event: " + reason);
const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
writable_event = pair.writable;
}

View File

@@ -12,12 +12,15 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/hle/kernel/address_arbiter.h"
#include "core/hle/kernel/client_port.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/lock.h"
#include "core/hle/result.h"
@@ -58,12 +61,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
if (thread->HasWakeupCallback()) {
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
}
}
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
thread->GetWaitHandle() != 0) {
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
thread->GetStatus() == ThreadStatus::WaitCondVar);
} else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
thread->GetStatus() == ThreadStatus::WaitCondVar) {
thread->SetMutexWaitAddress(0);
thread->SetCondVarWaitAddress(0);
thread->SetWaitHandle(0);
@@ -83,18 +82,23 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
}
if (resume) {
if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
thread->GetStatus() == ThreadStatus::WaitArb) {
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
}
thread->ResumeFromWait();
}
}
struct KernelCore::Impl {
explicit Impl(Core::System& system) : system{system} {}
explicit Impl(Core::System& system) : system{system}, global_scheduler{system} {}
void Initialize(KernelCore& kernel) {
Shutdown();
InitializeSystemResourceLimit(kernel);
InitializeThreads();
InitializePreemption();
}
void Shutdown() {
@@ -110,6 +114,9 @@ struct KernelCore::Impl {
thread_wakeup_callback_handle_table.Clear();
thread_wakeup_event_type = nullptr;
preemption_event = nullptr;
global_scheduler.Shutdown();
named_ports.clear();
}
@@ -132,6 +139,18 @@ struct KernelCore::Impl {
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
}
void InitializePreemption() {
preemption_event = system.CoreTiming().RegisterEvent(
"PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
global_scheduler.PreemptThreads();
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
});
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
}
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
@@ -140,10 +159,12 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<SharedPtr<Process>> process_list;
Process* current_process = nullptr;
Kernel::GlobalScheduler global_scheduler;
SharedPtr<ResourceLimit> system_resource_limit;
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
Core::Timing::EventType* preemption_event = nullptr;
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
// allowing us to simply use a pool index or similar.
Kernel::HandleTable thread_wakeup_callback_handle_table;
@@ -203,6 +224,14 @@ const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
return impl->process_list;
}
Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
return impl->global_scheduler;
}
const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
return impl->global_scheduler;
}
void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
impl->named_ports.emplace(std::move(name), std::move(port));
}

View File

@@ -21,6 +21,7 @@ namespace Kernel {
class AddressArbiter;
class ClientPort;
class GlobalScheduler;
class HandleTable;
class Process;
class ResourceLimit;
@@ -75,6 +76,12 @@ public:
/// Retrieves the list of processes.
const std::vector<SharedPtr<Process>>& GetProcessList() const;
/// Gets the sole instance of the global scheduler
Kernel::GlobalScheduler& GlobalScheduler();
/// Gets the sole instance of the global scheduler
const Kernel::GlobalScheduler& GlobalScheduler() const;
/// Adds a port to the named port table
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);

View File

@@ -7,6 +7,7 @@
#include "common/assert.h"
#include "core/core.h"
#include "core/core_cpu.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/handle_table.h"
#include "core/hle/kernel/kernel.h"
@@ -78,7 +79,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
// thread.
ASSERT(requesting_thread == current_thread);
const u32 addr_value = Memory::Read32(address);
u32 addr_value = Memory::Read32(address);
// If the mutex isn't being held, just return success.
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
@@ -89,6 +90,20 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
return ERR_INVALID_HANDLE;
}
// This a workaround where an unknown bug writes the mutex value to give ownership to a cond var
// waiting thread.
if (holding_thread->GetStatus() == ThreadStatus::WaitCondVar) {
if (holding_thread->GetMutexWaitAddress() == address) {
Release(address, holding_thread.get());
addr_value = Memory::Read32(address);
if (addr_value == 0)
return RESULT_SUCCESS;
else {
holding_thread = handle_table.Get<Thread>(addr_value & Mutex::MutexOwnerMask);
}
}
}
// Wait until the mutex is released
current_thread->SetMutexWaitAddress(address);
current_thread->SetWaitHandle(requesting_thread_handle);
@@ -104,14 +119,13 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
return RESULT_SUCCESS;
}
ResultCode Mutex::Release(VAddr address) {
ResultCode Mutex::Release(VAddr address, Thread* holding_thread) {
// The mutex address must be 4-byte aligned
if ((address % sizeof(u32)) != 0) {
return ERR_INVALID_ADDRESS;
}
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(holding_thread, address);
// There are no more threads waiting for the mutex, release it completely.
if (thread == nullptr) {
@@ -120,7 +134,7 @@ ResultCode Mutex::Release(VAddr address) {
}
// Transfer the ownership of the mutex from the previous owner to the new one.
TransferMutexOwnership(address, current_thread, thread);
TransferMutexOwnership(address, holding_thread, thread);
u32 mutex_value = thread->GetWaitHandle();
@@ -139,6 +153,12 @@ ResultCode Mutex::Release(VAddr address) {
thread->SetCondVarWaitAddress(0);
thread->SetMutexWaitAddress(0);
thread->SetWaitHandle(0);
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
if (thread->GetProcessorID() >= 0)
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
if (holding_thread->GetProcessorID() >= 0)
system.CpuCore(holding_thread->GetProcessorID()).PrepareReschedule();
return RESULT_SUCCESS;
}

View File

@@ -29,7 +29,7 @@ public:
Handle requesting_thread_handle);
/// Releases the mutex at the specified address.
ResultCode Release(VAddr address);
ResultCode Release(VAddr address, Thread* holding_thread);
private:
Core::System& system;

View File

@@ -32,11 +32,6 @@ enum class HandleType : u32 {
ServerSession,
};
enum class ResetType {
Automatic, ///< Reset automatically on object acquisition
Manual, ///< Never reset automatically
};
class Object : NonCopyable {
public:
explicit Object(KernelCore& kernel);

View File

@@ -213,10 +213,7 @@ void Process::PrepareForTermination() {
}
};
stop_threads(system.Scheduler(0).GetThreadList());
stop_threads(system.Scheduler(1).GetThreadList());
stop_threads(system.Scheduler(2).GetThreadList());
stop_threads(system.Scheduler(3).GetThreadList());
stop_threads(system.GlobalScheduler().GetThreadList());
FreeTLSRegion(tls_region_address);
tls_region_address = 0;

View File

@@ -20,15 +20,13 @@ bool ReadableEvent::ShouldWait(const Thread* thread) const {
void ReadableEvent::Acquire(Thread* thread) {
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
if (reset_type == ResetType::Automatic) {
signaled = false;
}
}
void ReadableEvent::Signal() {
signaled = true;
WakeupAllWaitingThreads();
if (!signaled) {
signaled = true;
WakeupAllWaitingThreads();
};
}
void ReadableEvent::Clear() {

View File

@@ -27,10 +27,6 @@ public:
return name;
}
ResetType GetResetType() const {
return reset_type;
}
static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent;
HandleType GetHandleType() const override {
return HANDLE_TYPE;
@@ -55,8 +51,7 @@ private:
void Signal();
ResetType reset_type;
bool signaled;
bool signaled{};
std::string name; ///< Name of event (optional)
};

View File

@@ -1,8 +1,13 @@
// Copyright 2018 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
//
// SelectThreads, Yield functions originally by TuxSH.
// licensed under GPLv2 or later under exception provided by the author.
#include <algorithm>
#include <set>
#include <unordered_set>
#include <utility>
#include "common/assert.h"
@@ -17,56 +22,374 @@
namespace Kernel {
std::mutex Scheduler::scheduler_mutex;
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {}
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
: cpu_core{cpu_core}, system{system} {}
GlobalScheduler::~GlobalScheduler() = default;
Scheduler::~Scheduler() {
for (auto& thread : thread_list) {
thread->Stop();
void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
thread_list.push_back(std::move(thread));
}
void GlobalScheduler::RemoveThread(const Thread* thread) {
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
void GlobalScheduler::UnloadThread(s32 core) {
Scheduler& sched = system.Scheduler(core);
sched.UnloadThread();
}
void GlobalScheduler::SelectThread(u32 core) {
const auto update_thread = [](Thread* thread, Scheduler& sched) {
if (thread != sched.selected_thread) {
if (thread == nullptr) {
++sched.idle_selection_count;
}
sched.selected_thread = thread;
}
sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
std::atomic_thread_fence(std::memory_order_seq_cst);
};
Scheduler& sched = system.Scheduler(core);
Thread* current_thread = nullptr;
// Step 1: Get top thread in schedule queue.
current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
if (current_thread) {
update_thread(current_thread, sched);
return;
}
// Step 2: Try selecting a suggested thread.
Thread* winner = nullptr;
std::set<s32> sug_cores;
for (auto thread : suggested_queue[core]) {
s32 this_core = thread->GetProcessorID();
Thread* thread_on_core = nullptr;
if (this_core >= 0) {
thread_on_core = scheduled_queue[this_core].front();
}
if (this_core < 0 || thread != thread_on_core) {
winner = thread;
break;
}
sug_cores.insert(this_core);
}
// if we got a suggested thread, select it, else do a second pass.
if (winner && winner->GetPriority() > 2) {
if (winner->IsRunning()) {
UnloadThread(winner->GetProcessorID());
}
TransferToCore(winner->GetPriority(), core, winner);
update_thread(winner, sched);
return;
}
// Step 3: Select a suggested thread from another core
for (auto& src_core : sug_cores) {
auto it = scheduled_queue[src_core].begin();
it++;
if (it != scheduled_queue[src_core].end()) {
Thread* thread_on_core = scheduled_queue[src_core].front();
Thread* to_change = *it;
if (thread_on_core->IsRunning() || to_change->IsRunning()) {
UnloadThread(src_core);
}
TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
current_thread = thread_on_core;
break;
}
}
update_thread(current_thread, sched);
}
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
// Note: caller should use critical section, etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
const Thread* const winner = scheduled_queue[core_id].front(priority);
ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front");
scheduled_queue[core_id].yield(priority);
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
const u32 priority = yielding_thread->GetPriority();
// Yield the thread
ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
"Thread yielding without being in front");
scheduled_queue[core_id].yield(priority);
std::array<Thread*, NUM_CPU_CORES> current_threads;
for (u32 i = 0; i < NUM_CPU_CORES; i++) {
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
}
Thread* next_thread = scheduled_queue[core_id].front(priority);
Thread* winner = nullptr;
for (auto& thread : suggested_queue[core_id]) {
const s32 source_core = thread->GetProcessorID();
if (source_core >= 0) {
if (current_threads[source_core] != nullptr) {
if (thread == current_threads[source_core] ||
current_threads[source_core]->GetPriority() < min_regular_priority) {
continue;
}
}
}
if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
next_thread->GetPriority() < thread->GetPriority()) {
if (thread->GetPriority() <= priority) {
winner = thread;
break;
}
}
}
if (winner != nullptr) {
if (winner != yielding_thread) {
if (winner->IsRunning()) {
UnloadThread(winner->GetProcessorID());
}
TransferToCore(winner->GetPriority(), core_id, winner);
}
} else {
winner = next_thread;
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
// etc.
Thread* winner = nullptr;
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
// Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
// If the core is idle, perform load balancing, excluding the threads that have just used this
// function...
if (scheduled_queue[core_id].empty()) {
// Here, "current_threads" is calculated after the ""yield"", unlike yield -1
std::array<Thread*, NUM_CPU_CORES> current_threads;
for (u32 i = 0; i < NUM_CPU_CORES; i++) {
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
}
for (auto& thread : suggested_queue[core_id]) {
const s32 source_core = thread->GetProcessorID();
if (source_core < 0 || thread == current_threads[source_core]) {
continue;
}
if (current_threads[source_core] == nullptr ||
current_threads[source_core]->GetPriority() >= min_regular_priority) {
winner = thread;
}
break;
}
if (winner != nullptr) {
if (winner != yielding_thread) {
if (winner->IsRunning()) {
UnloadThread(winner->GetProcessorID());
}
TransferToCore(winner->GetPriority(), core_id, winner);
}
} else {
winner = yielding_thread;
}
}
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
}
void GlobalScheduler::PreemptThreads() {
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
if (scheduled_queue[core_id].size(priority) > 0) {
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
scheduled_queue[core_id].yield(priority);
if (scheduled_queue[core_id].size(priority) > 1) {
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
}
}
Thread* current_thread =
scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
Thread* winner = nullptr;
for (auto& thread : suggested_queue[core_id]) {
const s32 source_core = thread->GetProcessorID();
if (thread->GetPriority() != priority) {
continue;
}
if (source_core >= 0) {
Thread* next_thread = scheduled_queue[source_core].empty()
? nullptr
: scheduled_queue[source_core].front();
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
break;
}
if (next_thread == thread) {
continue;
}
}
if (current_thread != nullptr &&
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
winner = thread;
break;
}
}
if (winner != nullptr) {
if (winner->IsRunning()) {
UnloadThread(winner->GetProcessorID());
}
TransferToCore(winner->GetPriority(), s32(core_id), winner);
current_thread =
winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
}
if (current_thread != nullptr && current_thread->GetPriority() > priority) {
for (auto& thread : suggested_queue[core_id]) {
const s32 source_core = thread->GetProcessorID();
if (thread->GetPriority() < priority) {
continue;
}
if (source_core >= 0) {
Thread* next_thread = scheduled_queue[source_core].empty()
? nullptr
: scheduled_queue[source_core].front();
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
break;
}
if (next_thread == thread) {
continue;
}
}
if (current_thread != nullptr &&
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
winner = thread;
break;
}
}
if (winner != nullptr) {
if (winner->IsRunning()) {
UnloadThread(winner->GetProcessorID());
}
TransferToCore(winner->GetPriority(), s32(core_id), winner);
current_thread = winner;
}
}
is_reselection_pending.store(true, std::memory_order_release);
}
}
void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
suggested_queue[core].add(thread, priority);
}
void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
suggested_queue[core].remove(thread, priority);
}
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority);
}
void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
scheduled_queue[core].add(thread, priority, false);
}
void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].remove(thread, priority);
scheduled_queue[core].add(thread, priority);
}
void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
scheduled_queue[core].remove(thread, priority);
}
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
const s32 source_core = thread->GetProcessorID();
if (source_core == destination_core || !schedulable) {
return;
}
thread->SetProcessorID(destination_core);
if (source_core >= 0) {
Unschedule(priority, source_core, thread);
}
if (destination_core >= 0) {
Unsuggest(priority, destination_core, thread);
Schedule(priority, destination_core, thread);
}
if (source_core >= 0) {
Suggest(priority, source_core, thread);
}
}
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
const Thread* winner) {
if (current_thread == winner) {
current_thread->IncrementYieldCount();
return true;
} else {
is_reselection_pending.store(true, std::memory_order_release);
return false;
}
}
void GlobalScheduler::Shutdown() {
for (std::size_t core = 0; core < NUM_CPU_CORES; core++) {
scheduled_queue[core].clear();
suggested_queue[core].clear();
}
thread_list.clear();
}
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id)
: system(system), cpu_core(cpu_core), core_id(core_id) {}
Scheduler::~Scheduler() = default;
bool Scheduler::HaveReadyThreads() const {
std::lock_guard lock{scheduler_mutex};
return !ready_queue.empty();
return system.GlobalScheduler().HaveReadyThreads(core_id);
}
Thread* Scheduler::GetCurrentThread() const {
return current_thread.get();
}
Thread* Scheduler::GetSelectedThread() const {
return selected_thread.get();
}
void Scheduler::SelectThreads() {
system.GlobalScheduler().SelectThread(core_id);
}
u64 Scheduler::GetLastContextSwitchTicks() const {
return last_context_switch_time;
}
Thread* Scheduler::PopNextReadyThread() {
Thread* next = nullptr;
Thread* thread = GetCurrentThread();
if (thread && thread->GetStatus() == ThreadStatus::Running) {
if (ready_queue.empty()) {
return thread;
}
// We have to do better than the current thread.
// This call returns null when that's not possible.
next = ready_queue.front();
if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
next = thread;
}
} else {
if (ready_queue.empty()) {
return nullptr;
}
next = ready_queue.front();
void Scheduler::TryDoContextSwitch() {
if (is_context_switch_pending) {
SwitchContext();
}
return next;
}
void Scheduler::SwitchContext(Thread* new_thread) {
Thread* previous_thread = GetCurrentThread();
void Scheduler::UnloadThread() {
Thread* const previous_thread = GetCurrentThread();
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -80,23 +403,52 @@ void Scheduler::SwitchContext(Thread* new_thread) {
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
previous_thread->SetStatus(ThreadStatus::Ready);
}
previous_thread->SetIsRunning(false);
}
current_thread = nullptr;
}
void Scheduler::SwitchContext() {
Thread* const previous_thread = GetCurrentThread();
Thread* const new_thread = GetSelectedThread();
is_context_switch_pending = false;
if (new_thread == previous_thread) {
return;
}
Process* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
// Save context for previous thread
if (previous_thread) {
cpu_core.SaveContext(previous_thread->GetContext());
// Save the TPIDR_EL0 system register in case it was modified.
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
if (previous_thread->GetStatus() == ThreadStatus::Running) {
// This is only the case when a reschedule is triggered without the current thread
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
previous_thread->SetStatus(ThreadStatus::Ready);
}
previous_thread->SetIsRunning(false);
}
// Load context of new thread
if (new_thread) {
ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id),
"Thread must be assigned to this core.");
ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
"Thread must be ready to become running.");
// Cancel any outstanding wakeup events for this thread
new_thread->CancelWakeupTimer();
current_thread = new_thread;
ready_queue.remove(new_thread, new_thread->GetPriority());
new_thread->SetStatus(ThreadStatus::Running);
new_thread->SetIsRunning(true);
auto* const thread_owner_process = current_thread->GetOwnerProcess();
if (previous_process != thread_owner_process) {
@@ -130,124 +482,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
last_context_switch_time = most_recent_switch_ticks;
}
void Scheduler::Reschedule() {
std::lock_guard lock{scheduler_mutex};
Thread* cur = GetCurrentThread();
Thread* next = PopNextReadyThread();
if (cur && next) {
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
} else if (cur) {
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
} else if (next) {
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
}
SwitchContext(next);
}
void Scheduler::AddThread(SharedPtr<Thread> thread) {
std::lock_guard lock{scheduler_mutex};
thread_list.push_back(std::move(thread));
}
void Scheduler::RemoveThread(Thread* thread) {
std::lock_guard lock{scheduler_mutex};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end());
}
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.add(thread, priority);
}
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
std::lock_guard lock{scheduler_mutex};
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
ready_queue.remove(thread, priority);
}
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
std::lock_guard lock{scheduler_mutex};
if (thread->GetPriority() == priority) {
return;
}
// If thread was ready, adjust queues
if (thread->GetStatus() == ThreadStatus::Ready)
ready_queue.adjust(thread, thread->GetPriority(), priority);
}
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
std::lock_guard lock{scheduler_mutex};
const u32 mask = 1U << core;
for (auto* thread : ready_queue) {
if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
return thread;
}
}
return nullptr;
}
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
ASSERT(thread != nullptr);
// Avoid yielding if the thread isn't even running.
ASSERT(thread->GetStatus() == ThreadStatus::Running);
// Sanity check that the priority is valid
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
// Yield this thread -- sleep for zero time and force reschedule to different thread
GetCurrentThread()->Sleep(0);
}
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
ASSERT(thread != nullptr);
const auto priority = thread->GetPriority();
const auto core = static_cast<u32>(thread->GetProcessorID());
// Avoid yielding if the thread isn't even running.
ASSERT(thread->GetStatus() == ThreadStatus::Running);
// Sanity check that the priority is valid
ASSERT(priority < THREADPRIO_COUNT);
// Sleep for zero time to be able to force reschedule to different thread
GetCurrentThread()->Sleep(0);
Thread* suggested_thread = nullptr;
// Search through all of the cpu cores (except this one) for a suggested thread.
// Take the first non-nullptr one
for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
const auto res =
system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
// If scheduler provides a suggested thread
if (res != nullptr) {
// And its better than the current suggested thread (or is the first valid one)
if (suggested_thread == nullptr ||
suggested_thread->GetPriority() > res->GetPriority()) {
suggested_thread = res;
}
}
}
// If a suggested thread was found, queue that for this core
if (suggested_thread != nullptr)
suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
}
void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
void Scheduler::Shutdown() {
current_thread = nullptr;
selected_thread = nullptr;
}
} // namespace Kernel

View File

@@ -20,124 +20,185 @@ namespace Kernel {
class Process;
class Scheduler final {
class GlobalScheduler final {
public:
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core);
~Scheduler();
static constexpr u32 NUM_CPU_CORES = 4;
/// Returns whether there are any threads that are ready to run.
bool HaveReadyThreads() const;
/// Reschedules to the next available thread (call after current thread is suspended)
void Reschedule();
/// Gets the current running thread
Thread* GetCurrentThread() const;
/// Gets the timestamp for the last context switch in ticks.
u64 GetLastContextSwitchTicks() const;
explicit GlobalScheduler(Core::System& system);
~GlobalScheduler();
/// Adds a new thread to the scheduler
void AddThread(SharedPtr<Thread> thread);
/// Removes a thread from the scheduler
void RemoveThread(Thread* thread);
/// Schedules a thread that has become "ready"
void ScheduleThread(Thread* thread, u32 priority);
/// Unschedules a thread that was already scheduled
void UnscheduleThread(Thread* thread, u32 priority);
/// Sets the priority of a thread in the scheduler
void SetThreadPriority(Thread* thread, u32 priority);
/// Gets the next suggested thread for load balancing
Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const;
/**
* YieldWithoutLoadBalancing -- analogous to normal yield on a system
* Moves the thread to the end of the ready queue for its priority, and then reschedules the
* system to the new head of the queue.
*
* Example (Single Core -- but can be extrapolated to multi):
* ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->)
* Currently Running: ThreadR
*
* ThreadR calls YieldWithoutLoadBalancing
*
* ThreadR is moved to the end of ready_queue[prio=0]:
* ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->)
* Currently Running: Nothing
*
* System is rescheduled (ThreadA is popped off of queue):
* ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->)
* Currently Running: ThreadA
*
* If the queue is empty at time of call, no yielding occurs. This does not cross between cores
* or priorities at all.
*/
void YieldWithoutLoadBalancing(Thread* thread);
/**
* YieldWithLoadBalancing -- yield but with better selection of the new running thread
* Moves the current thread to the end of the ready queue for its priority, then selects a
* 'suggested thread' (a thread on a different core that could run on this core) from the
* scheduler, changes its core, and reschedules the current core to that thread.
*
* Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were
* single core):
* ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant
* ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
* Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
*
* ThreadQ calls YieldWithLoadBalancing
*
* ThreadQ is moved to the end of ready_queue[core=0][prio=0]:
* ready_queue[core=0][prio=0]: ThreadA, ThreadB
* ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
* Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
*
* A list of suggested threads for each core is compiled
* Suggested Threads: {ThreadC on Core 1}
* If this were quad core (as the switch is), there could be between 0 and 3 threads in this
* list. If there are more than one, the thread is selected by highest prio.
*
* ThreadC is core changed to Core 0:
* ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ
* ready_queue[core=1][prio=0]: ThreadD
* Currently Running: None on Core 0 || ThreadP on Core 1
*
* System is rescheduled (ThreadC is popped off of queue):
* ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ
* ready_queue[core=1][prio=0]: ThreadD
* Currently Running: ThreadC on Core 0 || ThreadP on Core 1
*
* If no suggested threads can be found this will behave just as normal yield. If there are
* multiple candidates for the suggested thread on a core, the highest prio is taken.
*/
void YieldWithLoadBalancing(Thread* thread);
/// Currently unknown -- asserts as unimplemented on call
void YieldAndWaitForLoadBalancing(Thread* thread);
void RemoveThread(const Thread* thread);
/// Returns a list of all threads managed by the scheduler
const std::vector<SharedPtr<Thread>>& GetThreadList() const {
return thread_list;
}
private:
/**
* Pops and returns the next thread from the thread queue
* @return A pointer to the next ready thread
* Add a thread to the suggested queue of a cpu core. Suggested threads may be
* picked if no thread is scheduled to run on the core.
*/
Thread* PopNextReadyThread();
void Suggest(u32 priority, u32 core, Thread* thread);
/**
* Switches the CPU's active thread context to that of the specified thread
* @param new_thread The thread to switch to
* Remove a thread to the suggested queue of a cpu core. Suggested threads may be
* picked if no thread is scheduled to run on the core.
*/
void SwitchContext(Thread* new_thread);
void Unsuggest(u32 priority, u32 core, Thread* thread);
/**
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
* back the queue in its priority level.
*/
void Schedule(u32 priority, u32 core, Thread* thread);
/**
* Add a thread to the scheduling queue of a cpu core. The thread is added at the
* front the queue in its priority level.
*/
void SchedulePrepend(u32 priority, u32 core, Thread* thread);
/// Reschedule an already scheduled thread based on a new priority
void Reschedule(u32 priority, u32 core, Thread* thread);
/// Unschedules a thread.
void Unschedule(u32 priority, u32 core, Thread* thread);
/**
* Transfers a thread into an specific core. If the destination_core is -1
* it will be unscheduled from its source code and added into its suggested
* queue.
*/
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
/// Selects a core and forces it to unload its current thread's context
void UnloadThread(s32 core);
/**
* Takes care of selecting the new scheduled thread in three steps:
*
* 1. First a thread is selected from the top of the priority queue. If no thread
* is obtained then we move to step two, else we are done.
*
* 2. Second we try to get a suggested thread that's not assigned to any core or
* that is not the top thread in that core.
*
* 3. Third is no suggested thread is found, we do a second pass and pick a running
* thread in another core and swap it with its current thread.
*/
void SelectThread(u32 core);
bool HaveReadyThreads(u32 core_id) const {
return !scheduled_queue[core_id].empty();
}
/**
* Takes a thread and moves it to the back of the it's priority list.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
bool YieldThread(Thread* thread);
/**
* Takes a thread and moves it to the back of the it's priority list.
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
* a better priority than the next thread in the core.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
bool YieldThreadAndBalanceLoad(Thread* thread);
/**
* Takes a thread and moves it out of the scheduling queue.
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
* a suggested thread is obtained instead.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
/**
* Rotates the scheduling queues of threads at a preemption priority and then does
* some core rebalancing. Preemption priorities can be found in the array
* 'preemption_priorities'.
*
* @note This operation happens every 10ms.
*/
void PreemptThreads();
u32 CpuCoresCount() const {
return NUM_CPU_CORES;
}
void SetReselectionPending() {
is_reselection_pending.store(true, std::memory_order_release);
}
bool IsReselectionPending() const {
return is_reselection_pending.load(std::memory_order_acquire);
}
void Shutdown();
private:
bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
static constexpr u32 min_regular_priority = 2;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
std::atomic<bool> is_reselection_pending{false};
// The priority levels at which the global scheduler preempts threads every 10 ms. They are
// ordered from Core 0 to Core 3.
std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
/// Lists all thread ids that aren't deleted/etc.
std::vector<SharedPtr<Thread>> thread_list;
Core::System& system;
};
class Scheduler final {
public:
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
~Scheduler();
/// Returns whether there are any threads that are ready to run.
bool HaveReadyThreads() const;
/// Reschedules to the next available thread (call after current thread is suspended)
void TryDoContextSwitch();
/// Unloads currently running thread
void UnloadThread();
/// Select the threads in top of the scheduling multilist.
void SelectThreads();
/// Gets the current running thread
Thread* GetCurrentThread() const;
/// Gets the currently selected thread from the top of the multilevel queue
Thread* GetSelectedThread() const;
/// Gets the timestamp for the last context switch in ticks.
u64 GetLastContextSwitchTicks() const;
bool ContextSwitchPending() const {
return is_context_switch_pending;
}
/// Shutdowns the scheduler.
void Shutdown();
private:
friend class GlobalScheduler;
/// Switches the CPU's active thread context to that of the specified thread
void SwitchContext();
/**
* Called on every context switch to update the internal timestamp
@@ -152,19 +213,16 @@ private:
*/
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
/// Lists all thread ids that aren't deleted/etc.
std::vector<SharedPtr<Thread>> thread_list;
/// Lists only ready thread ids.
Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
SharedPtr<Thread> current_thread = nullptr;
Core::ARM_Interface& cpu_core;
u64 last_context_switch_time = 0;
SharedPtr<Thread> selected_thread = nullptr;
Core::System& system;
static std::mutex scheduler_mutex;
Core::ARM_Interface& cpu_core;
u64 last_context_switch_time = 0;
u64 idle_selection_count = 0;
const u32 core_id;
bool is_context_switch_pending = false;
};
} // namespace Kernel

View File

@@ -516,7 +516,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
thread->WakeAfterDelay(nano_seconds);
thread->SetWakeupCallback(DefaultThreadWakeupCallback);
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_TIMEOUT;
}
@@ -534,6 +534,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
}
thread->CancelWait();
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -577,7 +578,8 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
}
auto* const current_process = system.Kernel().CurrentProcess();
return current_process->GetMutex().Release(mutex_addr);
return current_process->GetMutex().Release(mutex_addr,
system.CurrentScheduler().GetCurrentThread());
}
enum class BreakType : u32 {
@@ -1066,6 +1068,8 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
}
thread->SetActivity(static_cast<ThreadActivity>(activity));
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -1147,7 +1151,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
thread->SetPriority(priority);
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -1503,7 +1507,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
thread->SetName(
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -1525,7 +1529,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
thread->ResumeFromWait();
if (thread->GetStatus() == ThreadStatus::Ready) {
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(thread->GetProcessorID());
}
return RESULT_SUCCESS;
@@ -1537,7 +1541,7 @@ static void ExitThread(Core::System& system) {
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
current_thread->Stop();
system.CurrentScheduler().RemoveThread(current_thread);
system.GlobalScheduler().RemoveThread(current_thread);
system.PrepareReschedule();
}
@@ -1553,17 +1557,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
auto& scheduler = system.CurrentScheduler();
auto* const current_thread = scheduler.GetCurrentThread();
bool is_redundant = false;
if (nanoseconds <= 0) {
switch (static_cast<SleepType>(nanoseconds)) {
case SleepType::YieldWithoutLoadBalancing:
scheduler.YieldWithoutLoadBalancing(current_thread);
is_redundant = current_thread->YieldSimple();
break;
case SleepType::YieldWithLoadBalancing:
scheduler.YieldWithLoadBalancing(current_thread);
is_redundant = current_thread->YieldAndBalanceLoad();
break;
case SleepType::YieldAndWaitForLoadBalancing:
scheduler.YieldAndWaitForLoadBalancing(current_thread);
is_redundant = current_thread->YieldAndWaitForLoadBalancing();
break;
default:
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@@ -1572,10 +1577,13 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
current_thread->Sleep(nanoseconds);
}
// Reschedule all CPU cores
for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
system.CpuCore(i).PrepareReschedule();
if (is_redundant) {
// If it's redundant, the core is pretty much idle. Some games keep idling
// a core while it's doing nothing, we advance timing to avoid costly continuous
// calls.
system.CoreTiming().AddTicks(2000);
}
system.PrepareReschedule(current_thread->GetProcessorID());
}
/// Wait process wide key atomic
@@ -1601,17 +1609,21 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
return ERR_INVALID_ADDRESS;
}
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
auto* const current_process = system.Kernel().CurrentProcess();
const auto& handle_table = current_process->GetHandleTable();
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
ASSERT(thread);
const auto release_result = current_process->GetMutex().Release(mutex_addr);
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
const auto release_result =
current_process->GetMutex().Release(mutex_addr, current_thread.get());
if (release_result.IsError()) {
return release_result;
}
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
current_thread->SetCondVarWaitAddress(condition_variable_addr);
current_thread->SetMutexWaitAddress(mutex_addr);
current_thread->SetWaitHandle(thread_handle);
@@ -1622,7 +1634,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
// Note: Deliberately don't attempt to inherit the lock owner's priority.
system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
system.PrepareReschedule(current_thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -1632,24 +1644,19 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
condition_variable_addr, target);
const auto RetrieveWaitingThreads = [&system](std::size_t core_index,
std::vector<SharedPtr<Thread>>& waiting_threads,
VAddr condvar_addr) {
const auto& scheduler = system.Scheduler(core_index);
const auto& thread_list = scheduler.GetThreadList();
for (const auto& thread : thread_list) {
if (thread->GetCondVarWaitAddress() == condvar_addr)
waiting_threads.push_back(thread);
}
};
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
// Retrieve a list of all threads that are waiting for this condition variable.
std::vector<SharedPtr<Thread>> waiting_threads;
RetrieveWaitingThreads(0, waiting_threads, condition_variable_addr);
RetrieveWaitingThreads(1, waiting_threads, condition_variable_addr);
RetrieveWaitingThreads(2, waiting_threads, condition_variable_addr);
RetrieveWaitingThreads(3, waiting_threads, condition_variable_addr);
const auto& scheduler = system.GlobalScheduler();
const auto& thread_list = scheduler.GetThreadList();
for (const auto& thread : thread_list) {
if (thread->GetCondVarWaitAddress() == condition_variable_addr) {
waiting_threads.push_back(thread);
}
}
// Sort them by priority, such that the highest priority ones come first.
std::sort(waiting_threads.begin(), waiting_threads.end(),
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
@@ -1679,18 +1686,20 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
// Atomically read the value of the mutex.
u32 mutex_val = 0;
u32 update_val = 0;
const VAddr mutex_address = thread->GetMutexWaitAddress();
do {
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
monitor.SetExclusive(current_core, mutex_address);
// If the mutex is not yet acquired, acquire it.
mutex_val = Memory::Read32(thread->GetMutexWaitAddress());
mutex_val = Memory::Read32(mutex_address);
if (mutex_val != 0) {
monitor.ClearExclusive();
break;
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
} else {
update_val = thread->GetWaitHandle();
}
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
thread->GetWaitHandle()));
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
if (mutex_val == 0) {
// We were able to acquire the mutex, resume this thread.
ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
@@ -1704,20 +1713,9 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
thread->SetLockOwner(nullptr);
thread->SetMutexWaitAddress(0);
thread->SetWaitHandle(0);
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
system.PrepareReschedule(thread->GetProcessorID());
} else {
// Atomically signal that the mutex now has a waiting thread.
do {
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
// Ensure that the mutex value is still what we expect.
u32 value = Memory::Read32(thread->GetMutexWaitAddress());
// TODO(Subv): When this happens, the kernel just clears the exclusive state and
// retries the initial read for this thread.
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
mutex_val | Mutex::MutexHasWaitersFlag));
// The mutex is already owned by some other thread, make this thread wait on it.
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
@@ -1728,6 +1726,7 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
thread->SetStatus(ThreadStatus::WaitMutex);
owner->AddMutexWaiter(thread);
system.PrepareReschedule(thread->GetProcessorID());
}
}
@@ -1754,7 +1753,12 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
const ResultCode result =
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
if (result == RESULT_SUCCESS) {
system.PrepareReschedule();
}
return result;
}
// Signals to an address (via Address Arbiter)
@@ -2040,7 +2044,10 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ERR_INVALID_HANDLE;
}
system.PrepareReschedule(thread->GetProcessorID());
thread->ChangeCore(core, affinity_mask);
system.PrepareReschedule(thread->GetProcessorID());
return RESULT_SUCCESS;
}
@@ -2095,7 +2102,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle
auto& kernel = system.Kernel();
const auto [readable_event, writable_event] =
WritableEvent::CreateEventPair(kernel, ResetType::Manual, "CreateEvent");
WritableEvent::CreateEventPair(kernel, "CreateEvent");
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
@@ -2151,6 +2158,7 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) {
}
writable_event->Signal();
system.PrepareReschedule();
return RESULT_SUCCESS;
}

View File

@@ -45,15 +45,7 @@ void Thread::Stop() {
callback_handle);
kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
callback_handle = 0;
// Clean up thread from ready queue
// This is only needed when the thread is terminated forcefully (SVC TerminateProcess)
if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) {
scheduler->UnscheduleThread(this, current_priority);
}
status = ThreadStatus::Dead;
SetStatus(ThreadStatus::Dead);
WakeupAllWaitingThreads();
// Clean up any dangling references in objects that this thread was waiting for
@@ -132,17 +124,16 @@ void Thread::ResumeFromWait() {
wakeup_callback = nullptr;
if (activity == ThreadActivity::Paused) {
status = ThreadStatus::Paused;
SetStatus(ThreadStatus::Paused);
return;
}
status = ThreadStatus::Ready;
ChangeScheduler();
SetStatus(ThreadStatus::Ready);
}
void Thread::CancelWait() {
ASSERT(GetStatus() == ThreadStatus::WaitSynch);
ClearWaitObjects();
SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
ResumeFromWait();
}
@@ -205,9 +196,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
auto& scheduler = kernel.GlobalScheduler();
scheduler.AddThread(thread);
thread->tls_address = thread->owner_process->CreateTLSRegion();
thread->scheduler = &system.Scheduler(processor_id);
thread->scheduler->AddThread(thread);
thread->owner_process->RegisterThread(thread.get());
@@ -250,6 +241,22 @@ void Thread::SetStatus(ThreadStatus new_status) {
return;
}
switch (new_status) {
case ThreadStatus::Ready:
case ThreadStatus::Running:
SetSchedulingStatus(ThreadSchedStatus::Runnable);
break;
case ThreadStatus::Dormant:
SetSchedulingStatus(ThreadSchedStatus::None);
break;
case ThreadStatus::Dead:
SetSchedulingStatus(ThreadSchedStatus::Exited);
break;
default:
SetSchedulingStatus(ThreadSchedStatus::Paused);
break;
}
if (status == ThreadStatus::Running) {
last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
}
@@ -311,8 +318,7 @@ void Thread::UpdatePriority() {
return;
}
scheduler->SetThreadPriority(this, new_priority);
current_priority = new_priority;
SetCurrentPriority(new_priority);
if (!lock_owner) {
return;
@@ -328,47 +334,7 @@ void Thread::UpdatePriority() {
}
void Thread::ChangeCore(u32 core, u64 mask) {
ideal_core = core;
affinity_mask = mask;
ChangeScheduler();
}
void Thread::ChangeScheduler() {
if (status != ThreadStatus::Ready) {
return;
}
auto& system = Core::System::GetInstance();
std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)};
if (!new_processor_id) {
new_processor_id = processor_id;
}
if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) {
new_processor_id = ideal_core;
}
ASSERT(*new_processor_id < 4);
// Add thread to new core's scheduler
auto& next_scheduler = system.Scheduler(*new_processor_id);
if (*new_processor_id != processor_id) {
// Remove thread from previous core's scheduler
scheduler->RemoveThread(this);
next_scheduler.AddThread(this);
}
processor_id = *new_processor_id;
// If the thread was ready, unschedule from the previous core and schedule on the new core
scheduler->UnscheduleThread(this, current_priority);
next_scheduler.ScheduleThread(this, current_priority);
// Change thread's scheduler
scheduler = &next_scheduler;
system.CpuCore(processor_id).PrepareReschedule();
SetCoreAndAffinityMask(core, mask);
}
bool Thread::AllWaitObjectsReady() const {
@@ -388,10 +354,8 @@ void Thread::SetActivity(ThreadActivity value) {
if (value == ThreadActivity::Paused) {
// Set status if not waiting
if (status == ThreadStatus::Ready) {
status = ThreadStatus::Paused;
} else if (status == ThreadStatus::Running) {
status = ThreadStatus::Paused;
if (status == ThreadStatus::Ready || status == ThreadStatus::Running) {
SetStatus(ThreadStatus::Paused);
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
}
} else if (status == ThreadStatus::Paused) {
@@ -408,6 +372,170 @@ void Thread::Sleep(s64 nanoseconds) {
WakeAfterDelay(nanoseconds);
}
bool Thread::YieldSimple() {
auto& scheduler = kernel.GlobalScheduler();
return scheduler.YieldThread(this);
}
bool Thread::YieldAndBalanceLoad() {
auto& scheduler = kernel.GlobalScheduler();
return scheduler.YieldThreadAndBalanceLoad(this);
}
bool Thread::YieldAndWaitForLoadBalancing() {
auto& scheduler = kernel.GlobalScheduler();
return scheduler.YieldThreadAndWaitForLoadBalancing(this);
}
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
const u32 old_flags = scheduling_state;
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
static_cast<u32>(new_status);
AdjustSchedulingOnStatus(old_flags);
}
void Thread::SetCurrentPriority(u32 new_priority) {
const u32 old_priority = std::exchange(current_priority, new_priority);
AdjustSchedulingOnPriority(old_priority);
}
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
for (s32 core = max_cores - 1; core >= 0; core--) {
if (((mask >> core) & 1) != 0) {
return core;
}
}
return -1;
};
const bool use_override = affinity_override_count != 0;
if (new_core == THREADPROCESSORID_DONT_UPDATE) {
new_core = use_override ? ideal_core_override : ideal_core;
if ((new_affinity_mask & (1ULL << new_core)) == 0) {
return ERR_INVALID_COMBINATION;
}
}
if (use_override) {
ideal_core_override = new_core;
affinity_mask_override = new_affinity_mask;
} else {
const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
ideal_core = new_core;
if (old_affinity_mask != new_affinity_mask) {
const s32 old_core = processor_id;
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
if (ideal_core < 0) {
processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
} else {
processor_id = ideal_core;
}
}
AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
}
}
return RESULT_SUCCESS;
}
void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
if (old_flags == scheduling_state) {
return;
}
auto& scheduler = kernel.GlobalScheduler();
if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
ThreadSchedStatus::Runnable) {
// In this case the thread was running, now it's pausing/exitting
if (processor_id >= 0) {
scheduler.Unschedule(current_priority, processor_id, this);
}
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Unsuggest(current_priority, static_cast<u32>(core), this);
}
}
} else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
// The thread is now set to running from being stopped
if (processor_id >= 0) {
scheduler.Schedule(current_priority, processor_id, this);
}
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Suggest(current_priority, static_cast<u32>(core), this);
}
}
}
scheduler.SetReselectionPending();
}
void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
return;
}
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
if (processor_id >= 0) {
scheduler.Unschedule(old_priority, processor_id, this);
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Unsuggest(old_priority, core, this);
}
}
// Add thread to the new priority queues.
Thread* current_thread = GetCurrentThread();
if (processor_id >= 0) {
if (current_thread == this) {
scheduler.SchedulePrepend(current_priority, processor_id, this);
} else {
scheduler.Schedule(current_priority, processor_id, this);
}
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
scheduler.Suggest(current_priority, core, this);
}
}
scheduler.SetReselectionPending();
}
void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
current_priority >= THREADPRIO_COUNT) {
return;
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (((old_affinity_mask >> core) & 1) != 0) {
if (core == old_core) {
scheduler.Unschedule(current_priority, core, this);
} else {
scheduler.Unsuggest(current_priority, core, this);
}
}
}
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
if (((affinity_mask >> core) & 1) != 0) {
if (core == processor_id) {
scheduler.Schedule(current_priority, core, this);
} else {
scheduler.Suggest(current_priority, core, this);
}
}
}
scheduler.SetReselectionPending();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**

View File

@@ -75,6 +75,26 @@ enum class ThreadActivity : u32 {
Paused = 1,
};
enum class ThreadSchedStatus : u32 {
None = 0,
Paused = 1,
Runnable = 2,
Exited = 3,
};
enum class ThreadSchedFlags : u32 {
ProcessPauseFlag = 1 << 4,
ThreadPauseFlag = 1 << 5,
ProcessDebugPauseFlag = 1 << 6,
KernelInitPauseFlag = 1 << 8,
};
enum class ThreadSchedMasks : u32 {
LowMask = 0x000f,
HighMask = 0xfff0,
ForcePauseMask = 0x0070,
};
class Thread final : public WaitObject {
public:
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
@@ -278,6 +298,10 @@ public:
return processor_id;
}
void SetProcessorID(s32 new_core) {
processor_id = new_core;
}
Process* GetOwnerProcess() {
return owner_process;
}
@@ -295,6 +319,9 @@ public:
}
void ClearWaitObjects() {
for (const auto& waiting_object : wait_objects) {
waiting_object->RemoveWaitingThread(this);
}
wait_objects.clear();
}
@@ -383,11 +410,47 @@ public:
/// Sleeps this thread for the given amount of nanoseconds.
void Sleep(s64 nanoseconds);
/// Yields this thread without rebalancing loads.
bool YieldSimple();
/// Yields this thread and does a load rebalancing.
bool YieldAndBalanceLoad();
/// Yields this thread and if the core is left idle, loads are rebalanced
bool YieldAndWaitForLoadBalancing();
void IncrementYieldCount() {
yield_count++;
}
u64 GetYieldCount() const {
return yield_count;
}
ThreadSchedStatus GetSchedulingStatus() const {
return static_cast<ThreadSchedStatus>(scheduling_state &
static_cast<u32>(ThreadSchedMasks::LowMask));
}
bool IsRunning() const {
return is_running;
}
void SetIsRunning(bool value) {
is_running = value;
}
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
void ChangeScheduler();
void SetSchedulingStatus(ThreadSchedStatus new_status);
void SetCurrentPriority(u32 new_priority);
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
void AdjustSchedulingOnStatus(u32 old_flags);
void AdjustSchedulingOnPriority(u32 old_priority);
void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
Core::ARM_Interface::ThreadContext context{};
@@ -409,6 +472,8 @@ private:
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
///< a redundant yield is one where no scheduling is changed
s32 processor_id = 0;
@@ -453,6 +518,13 @@ private:
ThreadActivity activity = ThreadActivity::Normal;
s32 ideal_core_override = -1;
u64 affinity_mask_override = 0x1;
u32 affinity_override_count = 0;
u32 scheduling_state = 0;
bool is_running = false;
std::string name;
};

View File

@@ -6,6 +6,9 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_cpu.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/thread.h"
@@ -82,9 +85,6 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
const std::size_t index = thread->GetWaitObjectIndex(this);
for (const auto& object : thread->GetWaitObjects()) {
object->RemoveWaitingThread(thread.get());
}
thread->ClearWaitObjects();
thread->CancelWakeupTimer();
@@ -95,6 +95,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
}
if (resume) {
thread->ResumeFromWait();
Core::System::GetInstance().PrepareReschedule(thread->GetProcessorID());
}
}

View File

@@ -15,8 +15,7 @@ namespace Kernel {
WritableEvent::WritableEvent(KernelCore& kernel) : Object{kernel} {}
WritableEvent::~WritableEvent() = default;
EventPair WritableEvent::CreateEventPair(KernelCore& kernel, ResetType reset_type,
std::string name) {
EventPair WritableEvent::CreateEventPair(KernelCore& kernel, std::string name) {
SharedPtr<WritableEvent> writable_event(new WritableEvent(kernel));
SharedPtr<ReadableEvent> readable_event(new ReadableEvent(kernel));
@@ -24,7 +23,6 @@ EventPair WritableEvent::CreateEventPair(KernelCore& kernel, ResetType reset_typ
writable_event->readable = readable_event;
readable_event->name = name + ":Readable";
readable_event->signaled = false;
readable_event->reset_type = reset_type;
return {std::move(readable_event), std::move(writable_event)};
}
@@ -33,10 +31,6 @@ SharedPtr<ReadableEvent> WritableEvent::GetReadableEvent() const {
return readable;
}
ResetType WritableEvent::GetResetType() const {
return readable->reset_type;
}
void WritableEvent::Signal() {
readable->Signal();
}

View File

@@ -24,11 +24,9 @@ public:
/**
* Creates an event
* @param kernel The kernel instance to create this event under.
* @param reset_type ResetType describing how to create event
* @param name Optional name of event
*/
static EventPair CreateEventPair(KernelCore& kernel, ResetType reset_type,
std::string name = "Unknown");
static EventPair CreateEventPair(KernelCore& kernel, std::string name = "Unknown");
std::string GetTypeName() const override {
return "WritableEvent";
@@ -44,8 +42,6 @@ public:
SharedPtr<ReadableEvent> GetReadableEvent() const;
ResetType GetResetType() const;
void Signal();
void Clear();
bool IsSignaled() const;

View File

@@ -289,8 +289,8 @@ ISelfController::ISelfController(Core::System& system,
RegisterHandlers(functions);
auto& kernel = system.Kernel();
launchable_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual,
"ISelfController:LaunchableEvent");
launchable_event =
Kernel::WritableEvent::CreateEventPair(kernel, "ISelfController:LaunchableEvent");
// This event is created by AM on the first time GetAccumulatedSuspendedTickChangedEvent() is
// called. Yuzu can just create it unconditionally, since it doesn't need to support multiple
@@ -298,7 +298,7 @@ ISelfController::ISelfController(Core::System& system,
// suspended if the event has previously been created by a call to
// GetAccumulatedSuspendedTickChangedEvent.
accumulated_suspended_tick_changed_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ISelfController:AccumulatedSuspendedTickChangedEvent");
kernel, "ISelfController:AccumulatedSuspendedTickChangedEvent");
accumulated_suspended_tick_changed_event.writable->Signal();
}
@@ -523,10 +523,10 @@ void ISelfController::GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequest
}
AppletMessageQueue::AppletMessageQueue(Kernel::KernelCore& kernel) {
on_new_message = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual,
"AMMessageQueue:OnMessageRecieved");
on_operation_mode_changed = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "AMMessageQueue:OperationModeChanged");
on_new_message =
Kernel::WritableEvent::CreateEventPair(kernel, "AMMessageQueue:OnMessageRecieved");
on_operation_mode_changed =
Kernel::WritableEvent::CreateEventPair(kernel, "AMMessageQueue:OperationModeChanged");
}
AppletMessageQueue::~AppletMessageQueue() = default;
@@ -1073,9 +1073,9 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
{71, nullptr, "RequestToReboot"},
{80, nullptr, "ExitAndRequestToShowThanksMessage"},
{90, &IApplicationFunctions::EnableApplicationCrashReport, "EnableApplicationCrashReport"},
{100, nullptr, "InitializeApplicationCopyrightFrameBuffer"},
{101, nullptr, "SetApplicationCopyrightImage"},
{102, nullptr, "SetApplicationCopyrightVisibility"},
{100, &IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer, "InitializeApplicationCopyrightFrameBuffer"},
{101, &IApplicationFunctions::SetApplicationCopyrightImage, "SetApplicationCopyrightImage"},
{102, &IApplicationFunctions::SetApplicationCopyrightVisibility, "SetApplicationCopyrightVisibility"},
{110, nullptr, "QueryApplicationPlayStatistics"},
{120, nullptr, "ExecuteProgram"},
{121, nullptr, "ClearUserChannel"},
@@ -1091,7 +1091,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
auto& kernel = system.Kernel();
gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "IApplicationFunctions:GpuErrorDetectedSystemEvent");
kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent");
}
IApplicationFunctions::~IApplicationFunctions() = default;
@@ -1103,6 +1103,31 @@ void IApplicationFunctions::EnableApplicationCrashReport(Kernel::HLERequestConte
rb.Push(RESULT_SUCCESS);
}
void IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer(
Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IApplicationFunctions::SetApplicationCopyrightImage(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IApplicationFunctions::SetApplicationCopyrightVisibility(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto is_visible = rp.Pop<bool>();
LOG_WARNING(Service_AM, "(STUBBED) called, is_visible={}", is_visible);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IApplicationFunctions::BeginBlockingHomeButtonShortAndLongPressed(
Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_AM, "(STUBBED) called");
@@ -1140,8 +1165,9 @@ void IApplicationFunctions::PopLaunchParameter(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called, kind={:08X}", static_cast<u8>(kind));
if (kind == LaunchParameterKind::ApplicationSpecific && !launch_popped_application_specific) {
const auto backend = BCAT::CreateBackendFromSettings(
[this](u64 tid) { return system.GetFileSystemController().GetBCATDirectory(tid); });
const auto backend = BCAT::CreateBackendFromSettings(system, [this](u64 tid) {
return system.GetFileSystemController().GetBCATDirectory(tid);
});
const auto build_id_full = system.GetCurrentProcessBuildID();
u64 build_id{};
std::memcpy(&build_id, build_id_full.data(), sizeof(u64));

View File

@@ -252,6 +252,9 @@ private:
void BeginBlockingHomeButton(Kernel::HLERequestContext& ctx);
void EndBlockingHomeButton(Kernel::HLERequestContext& ctx);
void EnableApplicationCrashReport(Kernel::HLERequestContext& ctx);
void InitializeApplicationCopyrightFrameBuffer(Kernel::HLERequestContext& ctx);
void SetApplicationCopyrightImage(Kernel::HLERequestContext& ctx);
void SetApplicationCopyrightVisibility(Kernel::HLERequestContext& ctx);
void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx);
bool launch_popped_application_specific = false;

View File

@@ -24,12 +24,12 @@
namespace Service::AM::Applets {
AppletDataBroker::AppletDataBroker(Kernel::KernelCore& kernel) {
state_changed_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:StateChangedEvent");
pop_out_data_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:PopDataOutEvent");
state_changed_event =
Kernel::WritableEvent::CreateEventPair(kernel, "ILibraryAppletAccessor:StateChangedEvent");
pop_out_data_event =
Kernel::WritableEvent::CreateEventPair(kernel, "ILibraryAppletAccessor:PopDataOutEvent");
pop_interactive_out_data_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:PopInteractiveDataOutEvent");
kernel, "ILibraryAppletAccessor:PopInteractiveDataOutEvent");
}
AppletDataBroker::~AppletDataBroker() = default;

View File

@@ -67,8 +67,8 @@ AOC_U::AOC_U(Core::System& system)
RegisterHandlers(functions);
auto& kernel = system.Kernel();
aoc_change_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual,
"GetAddOnContentListChanged:Event");
aoc_change_event =
Kernel::WritableEvent::CreateEventPair(kernel, "GetAddOnContentListChanged:Event");
}
AOC_U::~AOC_U() = default;

View File

@@ -2,6 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <array>
#include <utility>
#include "common/logging/log.h"
#include "core/core_timing.h"
#include "core/hle/service/apm/controller.h"
@@ -9,8 +13,7 @@
namespace Service::APM {
constexpr PerformanceConfiguration DEFAULT_PERFORMANCE_CONFIGURATION =
PerformanceConfiguration::Config7;
constexpr auto DEFAULT_PERFORMANCE_CONFIGURATION = PerformanceConfiguration::Config7;
Controller::Controller(Core::Timing::CoreTiming& core_timing)
: core_timing{core_timing}, configs{
@@ -22,18 +25,35 @@ Controller::~Controller() = default;
void Controller::SetPerformanceConfiguration(PerformanceMode mode,
PerformanceConfiguration config) {
static const std::map<PerformanceConfiguration, u32> PCONFIG_TO_SPEED_MAP{
{PerformanceConfiguration::Config1, 1020}, {PerformanceConfiguration::Config2, 1020},
{PerformanceConfiguration::Config3, 1224}, {PerformanceConfiguration::Config4, 1020},
{PerformanceConfiguration::Config5, 1020}, {PerformanceConfiguration::Config6, 1224},
{PerformanceConfiguration::Config7, 1020}, {PerformanceConfiguration::Config8, 1020},
{PerformanceConfiguration::Config9, 1020}, {PerformanceConfiguration::Config10, 1020},
{PerformanceConfiguration::Config11, 1020}, {PerformanceConfiguration::Config12, 1020},
{PerformanceConfiguration::Config13, 1785}, {PerformanceConfiguration::Config14, 1785},
{PerformanceConfiguration::Config15, 1020}, {PerformanceConfiguration::Config16, 1020},
};
static constexpr std::array<std::pair<PerformanceConfiguration, u32>, 16> config_to_speed{{
{PerformanceConfiguration::Config1, 1020},
{PerformanceConfiguration::Config2, 1020},
{PerformanceConfiguration::Config3, 1224},
{PerformanceConfiguration::Config4, 1020},
{PerformanceConfiguration::Config5, 1020},
{PerformanceConfiguration::Config6, 1224},
{PerformanceConfiguration::Config7, 1020},
{PerformanceConfiguration::Config8, 1020},
{PerformanceConfiguration::Config9, 1020},
{PerformanceConfiguration::Config10, 1020},
{PerformanceConfiguration::Config11, 1020},
{PerformanceConfiguration::Config12, 1020},
{PerformanceConfiguration::Config13, 1785},
{PerformanceConfiguration::Config14, 1785},
{PerformanceConfiguration::Config15, 1020},
{PerformanceConfiguration::Config16, 1020},
}};
SetClockSpeed(PCONFIG_TO_SPEED_MAP.find(config)->second);
const auto iter = std::find_if(config_to_speed.cbegin(), config_to_speed.cend(),
[config](const auto& entry) { return entry.first == config; });
if (iter == config_to_speed.cend()) {
LOG_ERROR(Service_APM, "Invalid performance configuration value provided: {}",
static_cast<u32>(config));
return;
}
SetClockSpeed(iter->second);
configs.insert_or_assign(mode, config);
}
@@ -48,7 +68,7 @@ void Controller::SetFromCpuBoostMode(CpuBoostMode mode) {
BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode)));
}
PerformanceMode Controller::GetCurrentPerformanceMode() {
PerformanceMode Controller::GetCurrentPerformanceMode() const {
return Settings::values.use_docked_mode ? PerformanceMode::Docked : PerformanceMode::Handheld;
}

View File

@@ -56,7 +56,7 @@ public:
void SetPerformanceConfiguration(PerformanceMode mode, PerformanceConfiguration config);
void SetFromCpuBoostMode(CpuBoostMode mode);
PerformanceMode GetCurrentPerformanceMode();
PerformanceMode GetCurrentPerformanceMode() const;
PerformanceConfiguration GetCurrentPerformanceConfiguration(PerformanceMode mode);
private:

View File

@@ -65,8 +65,8 @@ public:
RegisterHandlers(functions);
// This is the event handle used to check if the audio buffer was released
buffer_event = Kernel::WritableEvent::CreateEventPair(
system.Kernel(), Kernel::ResetType::Manual, "IAudioOutBufferReleased");
buffer_event =
Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioOutBufferReleased");
stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate,
audio_params.channel_count, std::move(unique_name),

View File

@@ -47,8 +47,8 @@ public:
// clang-format on
RegisterHandlers(functions);
system_event = Kernel::WritableEvent::CreateEventPair(
system.Kernel(), Kernel::ResetType::Manual, "IAudioRenderer:SystemEvent");
system_event =
Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioRenderer:SystemEvent");
renderer = std::make_unique<AudioCore::AudioRenderer>(
system.CoreTiming(), audren_params, system_event.writable, instance_number);
}
@@ -180,17 +180,17 @@ public:
RegisterHandlers(functions);
auto& kernel = system.Kernel();
buffer_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IAudioOutBufferReleasedEvent");
buffer_event =
Kernel::WritableEvent::CreateEventPair(kernel, "IAudioOutBufferReleasedEvent");
// Should be similar to audio_output_device_switch_event
audio_input_device_switch_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IAudioDevice:AudioInputDeviceSwitchedEvent");
kernel, "IAudioDevice:AudioInputDeviceSwitchedEvent");
// Should only be signalled when an audio output device has been changed, example: speaker
// to headset
audio_output_device_switch_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IAudioDevice:AudioOutputDeviceSwitchedEvent");
kernel, "IAudioDevice:AudioOutputDeviceSwitchedEvent");
}
private:

View File

@@ -10,11 +10,10 @@
namespace Service::BCAT {
ProgressServiceBackend::ProgressServiceBackend(std::string_view event_name) {
auto& kernel{Core::System::GetInstance().Kernel()};
ProgressServiceBackend::ProgressServiceBackend(Kernel::KernelCore& kernel,
std::string_view event_name) {
event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic,
std::string("ProgressServiceBackend:UpdateEvent:").append(event_name));
kernel, std::string("ProgressServiceBackend:UpdateEvent:").append(event_name));
}
Kernel::SharedPtr<Kernel::ReadableEvent> ProgressServiceBackend::GetEvent() const {

View File

@@ -15,6 +15,14 @@
#include "core/hle/kernel/writable_event.h"
#include "core/hle/result.h"
namespace Core {
class System;
}
namespace Kernel {
class KernelCore;
}
namespace Service::BCAT {
struct DeliveryCacheProgressImpl;
@@ -88,7 +96,7 @@ public:
void FinishDownload(ResultCode result);
private:
explicit ProgressServiceBackend(std::string_view event_name);
explicit ProgressServiceBackend(Kernel::KernelCore& kernel, std::string_view event_name);
Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent() const;
DeliveryCacheProgressImpl& GetImpl();
@@ -145,6 +153,6 @@ public:
std::optional<std::vector<u8>> GetLaunchParameter(TitleIDVersion title) override;
};
std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter);
std::unique_ptr<Backend> CreateBackendFromSettings(Core::System& system, DirectoryGetter getter);
} // namespace Service::BCAT

View File

@@ -104,14 +104,15 @@ std::string GetZIPFilePath(u64 title_id) {
// If the error is something the user should know about (build ID mismatch, bad client version),
// display an error.
void HandleDownloadDisplayResult(DownloadResult res) {
void HandleDownloadDisplayResult(const AM::Applets::AppletManager& applet_manager,
DownloadResult res) {
if (res == DownloadResult::Success || res == DownloadResult::NoResponse ||
res == DownloadResult::GeneralWebError || res == DownloadResult::GeneralFSError ||
res == DownloadResult::NoMatchTitleId || res == DownloadResult::InvalidContentType) {
return;
}
const auto& frontend{Core::System::GetInstance().GetAppletManager().GetAppletFrontendSet()};
const auto& frontend{applet_manager.GetAppletFrontendSet()};
frontend.error->ShowCustomErrorText(
ResultCode(-1), "There was an error while attempting to use Boxcat.",
DOWNLOAD_RESULT_LOG_MESSAGES[static_cast<std::size_t>(res)], [] {});
@@ -264,12 +265,13 @@ private:
u64 build_id;
};
Boxcat::Boxcat(DirectoryGetter getter) : Backend(std::move(getter)) {}
Boxcat::Boxcat(AM::Applets::AppletManager& applet_manager_, DirectoryGetter getter)
: Backend(std::move(getter)), applet_manager{applet_manager_} {}
Boxcat::~Boxcat() = default;
void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title,
ProgressServiceBackend& progress,
void SynchronizeInternal(AM::Applets::AppletManager& applet_manager, DirectoryGetter dir_getter,
TitleIDVersion title, ProgressServiceBackend& progress,
std::optional<std::string> dir_name = {}) {
progress.SetNeedHLELock(true);
@@ -295,7 +297,7 @@ void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title,
FileUtil::Delete(zip_path);
}
HandleDownloadDisplayResult(res);
HandleDownloadDisplayResult(applet_manager, res);
progress.FinishDownload(ERROR_GENERAL_BCAT_FAILURE);
return;
}
@@ -364,17 +366,24 @@ void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title,
bool Boxcat::Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) {
is_syncing.exchange(true);
std::thread([this, title, &progress] { SynchronizeInternal(dir_getter, title, progress); })
std::thread([this, title, &progress] {
SynchronizeInternal(applet_manager, dir_getter, title, progress);
})
.detach();
return true;
}
bool Boxcat::SynchronizeDirectory(TitleIDVersion title, std::string name,
ProgressServiceBackend& progress) {
is_syncing.exchange(true);
std::thread(
[this, title, name, &progress] { SynchronizeInternal(dir_getter, title, progress, name); })
std::thread([this, title, name, &progress] {
SynchronizeInternal(applet_manager, dir_getter, title, progress, name);
})
.detach();
return true;
}
@@ -420,7 +429,7 @@ std::optional<std::vector<u8>> Boxcat::GetLaunchParameter(TitleIDVersion title)
FileUtil::Delete(path);
}
HandleDownloadDisplayResult(res);
HandleDownloadDisplayResult(applet_manager, res);
return std::nullopt;
}
}

View File

@@ -9,6 +9,10 @@
#include <optional>
#include "core/hle/service/bcat/backend/backend.h"
namespace Service::AM::Applets {
class AppletManager;
}
namespace Service::BCAT {
struct EventStatus {
@@ -20,12 +24,13 @@ struct EventStatus {
/// Boxcat is yuzu's custom backend implementation of Nintendo's BCAT service. It is free to use and
/// doesn't require a switch or nintendo account. The content is controlled by the yuzu team.
class Boxcat final : public Backend {
friend void SynchronizeInternal(DirectoryGetter dir_getter, TitleIDVersion title,
friend void SynchronizeInternal(AM::Applets::AppletManager& applet_manager,
DirectoryGetter dir_getter, TitleIDVersion title,
ProgressServiceBackend& progress,
std::optional<std::string> dir_name);
public:
explicit Boxcat(DirectoryGetter getter);
explicit Boxcat(AM::Applets::AppletManager& applet_manager_, DirectoryGetter getter);
~Boxcat() override;
bool Synchronize(TitleIDVersion title, ProgressServiceBackend& progress) override;
@@ -53,6 +58,7 @@ private:
class Client;
std::unique_ptr<Client> client;
AM::Applets::AppletManager& applet_manager;
};
} // namespace Service::BCAT

View File

@@ -125,7 +125,11 @@ private:
class IBcatService final : public ServiceFramework<IBcatService> {
public:
explicit IBcatService(Core::System& system_, Backend& backend_)
: ServiceFramework("IBcatService"), system{system_}, backend{backend_} {
: ServiceFramework("IBcatService"), system{system_}, backend{backend_},
progress{{
ProgressServiceBackend{system_.Kernel(), "Normal"},
ProgressServiceBackend{system_.Kernel(), "Directory"},
}} {
// clang-format off
static const FunctionInfo functions[] = {
{10100, &IBcatService::RequestSyncDeliveryCache, "RequestSyncDeliveryCache"},
@@ -249,10 +253,7 @@ private:
Core::System& system;
Backend& backend;
std::array<ProgressServiceBackend, static_cast<std::size_t>(SyncType::Count)> progress{
ProgressServiceBackend{"Normal"},
ProgressServiceBackend{"Directory"},
};
std::array<ProgressServiceBackend, static_cast<std::size_t>(SyncType::Count)> progress;
};
void Module::Interface::CreateBcatService(Kernel::HLERequestContext& ctx) {
@@ -557,12 +558,12 @@ void Module::Interface::CreateDeliveryCacheStorageServiceWithApplicationId(
rb.PushIpcInterface<IDeliveryCacheStorageService>(fsc.GetBCATDirectory(title_id));
}
std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter) {
const auto backend = Settings::values.bcat_backend;
std::unique_ptr<Backend> CreateBackendFromSettings([[maybe_unused]] Core::System& system,
DirectoryGetter getter) {
#ifdef YUZU_ENABLE_BOXCAT
if (backend == "boxcat")
return std::make_unique<Boxcat>(std::move(getter));
if (Settings::values.bcat_backend == "boxcat") {
return std::make_unique<Boxcat>(system.GetAppletManager(), std::move(getter));
}
#endif
return std::make_unique<NullBackend>(std::move(getter));
@@ -571,7 +572,8 @@ std::unique_ptr<Backend> CreateBackendFromSettings(DirectoryGetter getter) {
Module::Interface::Interface(Core::System& system_, std::shared_ptr<Module> module_,
FileSystem::FileSystemController& fsc_, const char* name)
: ServiceFramework(name), fsc{fsc_}, module{std::move(module_)},
backend{CreateBackendFromSettings([&fsc_](u64 tid) { return fsc_.GetBCATDirectory(tid); })},
backend{CreateBackendFromSettings(system_,
[&fsc_](u64 tid) { return fsc_.GetBCATDirectory(tid); })},
system{system_} {}
Module::Interface::~Interface() = default;

View File

@@ -34,8 +34,7 @@ public:
RegisterHandlers(functions);
auto& kernel = system.Kernel();
register_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "BT:RegisterEvent");
register_event = Kernel::WritableEvent::CreateEventPair(kernel, "BT:RegisterEvent");
}
private:

View File

@@ -57,14 +57,12 @@ public:
RegisterHandlers(functions);
auto& kernel = system.Kernel();
scan_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IBtmUserCore:ScanEvent");
connection_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IBtmUserCore:ConnectionEvent");
service_discovery = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IBtmUserCore:Discovery");
config_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IBtmUserCore:ConfigEvent");
scan_event = Kernel::WritableEvent::CreateEventPair(kernel, "IBtmUserCore:ScanEvent");
connection_event =
Kernel::WritableEvent::CreateEventPair(kernel, "IBtmUserCore:ConnectionEvent");
service_discovery =
Kernel::WritableEvent::CreateEventPair(kernel, "IBtmUserCore:Discovery");
config_event = Kernel::WritableEvent::CreateEventPair(kernel, "IBtmUserCore:ConfigEvent");
}
private:

View File

@@ -40,7 +40,10 @@ static FileSys::VirtualDir GetDirectoryRelativeWrapped(FileSys::VirtualDir base,
if (dir_name.empty() || dir_name == "." || dir_name == "/" || dir_name == "\\")
return base;
return base->GetDirectoryRelative(dir_name);
const auto res = base->GetDirectoryRelative(dir_name);
if (res == nullptr)
return base->CreateDirectoryRelative(dir_name);
return res;
}
VfsDirectoryServiceWrapper::VfsDirectoryServiceWrapper(FileSys::VirtualDir backing_)

View File

@@ -162,7 +162,7 @@ public:
RegisterHandlers(functions);
notification_event = Kernel::WritableEvent::CreateEventPair(
system.Kernel(), Kernel::ResetType::Manual, "INotificationService:NotifyEvent");
system.Kernel(), "INotificationService:NotifyEvent");
}
private:

View File

@@ -174,7 +174,7 @@ void Controller_NPad::OnInit() {
auto& kernel = system.Kernel();
for (std::size_t i = 0; i < styleset_changed_events.size(); i++) {
styleset_changed_events[i] = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, fmt::format("npad:NpadStyleSetChanged_{}", i));
kernel, fmt::format("npad:NpadStyleSetChanged_{}", i));
}
if (!IsControllerActivated()) {
@@ -583,36 +583,6 @@ bool Controller_NPad::SwapNpadAssignment(u32 npad_id_1, u32 npad_id_2) {
return true;
}
bool Controller_NPad::IsControllerSupported(NPadControllerType controller) {
if (controller == NPadControllerType::Handheld) {
// Handheld is not even a supported type, lets stop here
if (std::find(supported_npad_id_types.begin(), supported_npad_id_types.end(),
NPAD_HANDHELD) == supported_npad_id_types.end()) {
return false;
}
// Handheld should not be supported in docked mode
if (Settings::values.use_docked_mode) {
return false;
}
}
switch (controller) {
case NPadControllerType::ProController:
return style.pro_controller;
case NPadControllerType::Handheld:
return style.handheld;
case NPadControllerType::JoyDual:
return style.joycon_dual;
case NPadControllerType::JoyLeft:
return style.joycon_left;
case NPadControllerType::JoyRight:
return style.joycon_right;
case NPadControllerType::Pokeball:
return style.pokeball;
default:
return false;
}
}
Controller_NPad::LedPattern Controller_NPad::GetLedPattern(u32 npad_id) {
if (npad_id == npad_id_list.back() || npad_id == npad_id_list[npad_id_list.size() - 2]) {
// These are controllers without led patterns
@@ -659,25 +629,24 @@ void Controller_NPad::ClearAllConnectedControllers() {
}
void Controller_NPad::DisconnectAllConnectedControllers() {
std::for_each(connected_controllers.begin(), connected_controllers.end(),
[](ControllerHolder& controller) { controller.is_connected = false; });
for (ControllerHolder& controller : connected_controllers) {
controller.is_connected = false;
}
}
void Controller_NPad::ConnectAllDisconnectedControllers() {
std::for_each(connected_controllers.begin(), connected_controllers.end(),
[](ControllerHolder& controller) {
if (controller.type != NPadControllerType::None && !controller.is_connected) {
controller.is_connected = false;
}
});
for (ControllerHolder& controller : connected_controllers) {
if (controller.type != NPadControllerType::None && !controller.is_connected) {
controller.is_connected = true;
}
}
}
void Controller_NPad::ClearAllControllers() {
std::for_each(connected_controllers.begin(), connected_controllers.end(),
[](ControllerHolder& controller) {
controller.type = NPadControllerType::None;
controller.is_connected = false;
});
for (ControllerHolder& controller : connected_controllers) {
controller.type = NPadControllerType::None;
controller.is_connected = false;
}
}
u32 Controller_NPad::GetAndResetPressState() {
@@ -685,10 +654,10 @@ u32 Controller_NPad::GetAndResetPressState() {
}
bool Controller_NPad::IsControllerSupported(NPadControllerType controller) const {
const bool support_handheld =
std::find(supported_npad_id_types.begin(), supported_npad_id_types.end(), NPAD_HANDHELD) !=
supported_npad_id_types.end();
if (controller == NPadControllerType::Handheld) {
const bool support_handheld =
std::find(supported_npad_id_types.begin(), supported_npad_id_types.end(),
NPAD_HANDHELD) != supported_npad_id_types.end();
// Handheld is not even a supported type, lets stop here
if (!support_handheld) {
return false;
@@ -700,6 +669,7 @@ bool Controller_NPad::IsControllerSupported(NPadControllerType controller) const
return true;
}
if (std::any_of(supported_npad_id_types.begin(), supported_npad_id_types.end(),
[](u32 npad_id) { return npad_id <= MAX_NPAD_ID; })) {
switch (controller) {
@@ -717,6 +687,7 @@ bool Controller_NPad::IsControllerSupported(NPadControllerType controller) const
return false;
}
}
return false;
}
@@ -795,6 +766,7 @@ Controller_NPad::NPadControllerType Controller_NPad::DecideBestController(
priority_list.push_back(NPadControllerType::JoyLeft);
priority_list.push_back(NPadControllerType::JoyRight);
priority_list.push_back(NPadControllerType::JoyDual);
break;
}
const auto iter = std::find_if(priority_list.begin(), priority_list.end(),

View File

@@ -301,6 +301,11 @@ private:
bool is_connected;
};
void InitNewlyAddedControler(std::size_t controller_idx);
bool IsControllerSupported(NPadControllerType controller) const;
NPadControllerType DecideBestController(NPadControllerType priority) const;
void RequestPadStateUpdate(u32 npad_id);
u32 press_state{};
NPadType style{};
@@ -321,12 +326,7 @@ private:
std::array<ControllerHolder, 10> connected_controllers{};
bool can_controllers_vibrate{true};
void InitNewlyAddedControler(std::size_t controller_idx);
bool IsControllerSupported(NPadControllerType controller) const;
NPadControllerType DecideBestController(NPadControllerType priority) const;
void RequestPadStateUpdate(u32 npad_id);
std::array<ControllerPad, 10> npad_pad_states{};
bool IsControllerSupported(NPadControllerType controller);
bool is_in_lr_assignment_mode{false};
Core::System& system;
};

View File

@@ -203,13 +203,13 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
{120, &Hid::SetNpadJoyHoldType, "SetNpadJoyHoldType"},
{121, &Hid::GetNpadJoyHoldType, "GetNpadJoyHoldType"},
{122, &Hid::SetNpadJoyAssignmentModeSingleByDefault, "SetNpadJoyAssignmentModeSingleByDefault"},
{123, nullptr, "SetNpadJoyAssignmentModeSingleByDefault"},
{123, &Hid::SetNpadJoyAssignmentModeSingle, "SetNpadJoyAssignmentModeSingle"},
{124, &Hid::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"},
{125, &Hid::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"},
{126, &Hid::StartLrAssignmentMode, "StartLrAssignmentMode"},
{127, &Hid::StopLrAssignmentMode, "StopLrAssignmentMode"},
{128, &Hid::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"},
{129, nullptr, "GetNpadHandheldActivationMode"},
{129, &Hid::GetNpadHandheldActivationMode, "GetNpadHandheldActivationMode"},
{130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"},
{131, nullptr, "IsUnintendedHomeButtonInputProtectionEnabled"},
{132, nullptr, "EnableUnintendedHomeButtonInputProtection"},
@@ -557,10 +557,126 @@ void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx
LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}", npad_id,
applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Single);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) {
// TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault
IPC::RequestParser rp{ctx};
const auto npad_id{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
const auto npad_joy_device_type{rp.Pop<u64>()};
LOG_WARNING(Service_HID,
"(STUBBED) called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
npad_id, applet_resource_user_id, npad_joy_device_type);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Single);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto npad_id{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", npad_id,
applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Dual);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto unknown_1{rp.Pop<u32>()};
const auto unknown_2{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_HID,
"(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
unknown_1, unknown_2, applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StartLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StopLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
const auto mode{rp.Pop<u64>()};
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, mode={}",
applet_resource_user_id, mode);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::GetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto npad_1{rp.Pop<u32>()};
const auto npad_2{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}",
applet_resource_user_id, npad_1, npad_2);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
IPC::ResponseBuilder rb{ctx, 2};
if (controller.SwapNpadAssignment(npad_1, npad_2)) {
rb.Push(RESULT_SUCCESS);
} else {
LOG_ERROR(Service_HID, "Npads are not connected!");
rb.Push(ERR_NPAD_NOT_CONNECTED);
}
}
void Hid::BeginPermitVibrationSession(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
@@ -635,47 +751,6 @@ void Hid::GetActualVibrationValue(Kernel::HLERequestContext& ctx) {
applet_resource->GetController<Controller_NPad>(HidController::NPad).GetLastVibration());
}
void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto npad_id{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", npad_id,
applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Dual);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto unknown_1{rp.Pop<u32>()};
const auto unknown_2{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_WARNING(Service_HID,
"(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
unknown_1, unknown_2, applet_resource_user_id);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
const auto mode{rp.Pop<u64>()};
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, mode={}",
applet_resource_user_id, mode);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_HID, "called");
@@ -769,49 +844,6 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
}
void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StartLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StopLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto npad_1{rp.Pop<u32>()};
const auto npad_2{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}",
applet_resource_user_id, npad_1, npad_2);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
IPC::ResponseBuilder rb{ctx, 2};
if (controller.SwapNpadAssignment(npad_1, npad_2)) {
rb.Push(RESULT_SUCCESS);
} else {
LOG_ERROR(Service_HID, "Npads are not connected!");
rb.Push(ERR_NPAD_NOT_CONNECTED);
}
}
class HidDbg final : public ServiceFramework<HidDbg> {
public:
explicit HidDbg() : ServiceFramework{"hid:dbg"} {

View File

@@ -106,14 +106,19 @@ private:
void SetNpadJoyHoldType(Kernel::HLERequestContext& ctx);
void GetNpadJoyHoldType(Kernel::HLERequestContext& ctx);
void SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx);
void SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx);
void SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx);
void MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx);
void StartLrAssignmentMode(Kernel::HLERequestContext& ctx);
void StopLrAssignmentMode(Kernel::HLERequestContext& ctx);
void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
void GetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
void SwapNpadAssignment(Kernel::HLERequestContext& ctx);
void BeginPermitVibrationSession(Kernel::HLERequestContext& ctx);
void EndPermitVibrationSession(Kernel::HLERequestContext& ctx);
void SendVibrationValue(Kernel::HLERequestContext& ctx);
void SendVibrationValues(Kernel::HLERequestContext& ctx);
void GetActualVibrationValue(Kernel::HLERequestContext& ctx);
void SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx);
void MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx);
void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
void GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx);
void CreateActiveVibrationDeviceList(Kernel::HLERequestContext& ctx);
void PermitVibration(Kernel::HLERequestContext& ctx);
@@ -123,9 +128,6 @@ private:
void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
void StartLrAssignmentMode(Kernel::HLERequestContext& ctx);
void StopLrAssignmentMode(Kernel::HLERequestContext& ctx);
void SwapNpadAssignment(Kernel::HLERequestContext& ctx);
std::shared_ptr<IAppletResource> applet_resource;
Core::System& system;

View File

@@ -10,6 +10,8 @@
#include "core/hle/service/lbl/lbl.h"
#include "core/hle/service/service.h"
#include "core/hle/service/sm/sm.h"
#include "core/settings.h"
#include "video_core/renderer_base.h"
namespace Service::LBL {
@@ -18,21 +20,21 @@ public:
explicit LBL() : ServiceFramework{"lbl"} {
// clang-format off
static const FunctionInfo functions[] = {
{0, nullptr, "SaveCurrentSetting"},
{1, nullptr, "LoadCurrentSetting"},
{2, nullptr, "SetCurrentBrightnessSetting"},
{3, nullptr, "GetCurrentBrightnessSetting"},
{4, nullptr, "ApplyCurrentBrightnessSettingToBacklight"},
{5, nullptr, "GetBrightnessSettingAppliedToBacklight"},
{6, nullptr, "SwitchBacklightOn"},
{7, nullptr, "SwitchBacklightOff"},
{8, nullptr, "GetBacklightSwitchStatus"},
{9, nullptr, "EnableDimming"},
{10, nullptr, "DisableDimming"},
{11, nullptr, "IsDimmingEnabled"},
{12, nullptr, "EnableAutoBrightnessControl"},
{13, nullptr, "DisableAutoBrightnessControl"},
{14, nullptr, "IsAutoBrightnessControlEnabled"},
{0, &LBL::SaveCurrentSetting, "SaveCurrentSetting"},
{1, &LBL::LoadCurrentSetting, "LoadCurrentSetting"},
{2, &LBL::SetCurrentBrightnessSetting, "SetCurrentBrightnessSetting"},
{3, &LBL::GetCurrentBrightnessSetting, "GetCurrentBrightnessSetting"},
{4, &LBL::ApplyCurrentBrightnessSettingToBacklight, "ApplyCurrentBrightnessSettingToBacklight"},
{5, &LBL::GetBrightnessSettingAppliedToBacklight, "GetBrightnessSettingAppliedToBacklight"},
{6, &LBL::SwitchBacklightOn, "SwitchBacklightOn"},
{7, &LBL::SwitchBacklightOff, "SwitchBacklightOff"},
{8, &LBL::GetBacklightSwitchStatus, "GetBacklightSwitchStatus"},
{9, &LBL::EnableDimming, "EnableDimming"},
{10, &LBL::DisableDimming, "DisableDimming"},
{11, &LBL::IsDimmingEnabled, "IsDimmingEnabled"},
{12, &LBL::EnableAutoBrightnessControl, "EnableAutoBrightnessControl"},
{13, &LBL::DisableAutoBrightnessControl, "DisableAutoBrightnessControl"},
{14, &LBL::IsAutoBrightnessControlEnabled, "IsAutoBrightnessControlEnabled"},
{15, nullptr, "SetAmbientLightSensorValue"},
{16, nullptr, "GetAmbientLightSensorValue"},
{17, nullptr, "SetBrightnessReflectionDelayLevel"},
@@ -42,8 +44,8 @@ public:
{21, nullptr, "SetCurrentAmbientLightSensorMapping"},
{22, nullptr, "GetCurrentAmbientLightSensorMapping"},
{23, nullptr, "IsAmbientLightSensorAvailable"},
{24, nullptr, "SetCurrentBrightnessSettingForVrMode"},
{25, nullptr, "GetCurrentBrightnessSettingForVrMode"},
{24, &LBL::SetCurrentBrightnessSettingForVrMode, "SetCurrentBrightnessSettingForVrMode"},
{25, &LBL::GetCurrentBrightnessSettingForVrMode, "GetCurrentBrightnessSettingForVrMode"},
{26, &LBL::EnableVrMode, "EnableVrMode"},
{27, &LBL::DisableVrMode, "DisableVrMode"},
{28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"},
@@ -53,13 +55,209 @@ public:
RegisterHandlers(functions);
}
void LoadFromSettings() {
current_brightness = Settings::values.backlight_brightness;
current_vr_mode_brightness = Settings::values.backlight_brightness;
if (auto_brightness_enabled) {
return;
}
if (vr_mode_enabled) {
Renderer().SetCurrentBrightness(current_vr_mode_brightness);
} else {
Renderer().SetCurrentBrightness(current_brightness);
}
}
private:
f32 GetAutoBrightnessValue() const {
return 0.5f;
}
VideoCore::RendererBase& Renderer() {
return Core::System::GetInstance().Renderer();
}
void SaveCurrentSetting(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
Settings::values.backlight_brightness = current_brightness;
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void LoadCurrentSetting(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
LoadFromSettings();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void SetCurrentBrightnessSetting(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto value = rp.PopRaw<f32>();
LOG_DEBUG(Service_LBL, "called, value={:.3f}", value);
current_brightness = std::clamp(value, 0.0f, 1.0f);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetCurrentBrightnessSetting(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(current_brightness);
}
void ApplyCurrentBrightnessSettingToBacklight(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
if (!auto_brightness_enabled) {
Renderer().SetCurrentBrightness(vr_mode_enabled ? current_vr_mode_brightness
: current_brightness);
}
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetBrightnessSettingAppliedToBacklight(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(Renderer().GetCurrentResultantBrightness());
}
void SwitchBacklightOn(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto fade_time = rp.PopRaw<u64>();
LOG_DEBUG(Service_LBL, "called, fade_time={:016X}", fade_time);
Renderer().SetBacklightStatus(true, fade_time);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void SwitchBacklightOff(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto fade_time = rp.PopRaw<u64>();
LOG_DEBUG(Service_LBL, "called, fade_time={:016X}", fade_time);
Renderer().SetBacklightStatus(false, fade_time);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetBacklightSwitchStatus(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(Renderer().GetBacklightStatus());
}
void EnableDimming(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
dimming_enabled = true;
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void DisableDimming(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "callled");
dimming_enabled = false;
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IsDimmingEnabled(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(dimming_enabled);
}
void EnableAutoBrightnessControl(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
auto_brightness_enabled = true;
Renderer().SetCurrentBrightness(GetAutoBrightnessValue());
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void DisableAutoBrightnessControl(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
auto_brightness_enabled = false;
Renderer().SetCurrentBrightness(current_brightness);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void IsAutoBrightnessControlEnabled(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(auto_brightness_enabled);
}
void SetCurrentBrightnessSettingForVrMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto value = rp.PopRaw<f32>();
LOG_DEBUG(Service_LBL, "called, value={:.3f}", value);
current_vr_mode_brightness = std::clamp(value, 0.0f, 1.0f);
if (vr_mode_enabled && !auto_brightness_enabled) {
Renderer().SetCurrentBrightness(value);
}
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetCurrentBrightnessSettingForVrMode(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(current_vr_mode_brightness);
}
void EnableVrMode(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_LBL, "called");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
if (!vr_mode_enabled && !auto_brightness_enabled &&
current_brightness != current_vr_mode_brightness) {
Renderer().SetCurrentBrightness(current_vr_mode_brightness);
}
vr_mode_enabled = true;
}
@@ -69,6 +267,11 @@ private:
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
if (vr_mode_enabled && !auto_brightness_enabled &&
current_brightness != current_vr_mode_brightness) {
Renderer().SetCurrentBrightness(current_brightness);
}
vr_mode_enabled = false;
}
@@ -80,9 +283,27 @@ private:
rb.Push(vr_mode_enabled);
}
bool auto_brightness_enabled = false;
bool dimming_enabled = true;
f32 current_brightness = GetAutoBrightnessValue();
f32 current_vr_mode_brightness = GetAutoBrightnessValue();
bool vr_mode_enabled = false;
};
void RequestLoadCurrentSetting(SM::ServiceManager& sm) {
if (&sm == nullptr) {
return;
}
const auto lbl = sm.GetService<LBL>("lbl");
if (lbl) {
lbl->LoadFromSettings();
}
}
void InstallInterfaces(SM::ServiceManager& sm) {
std::make_shared<LBL>()->InstallAsService(sm);
}

View File

@@ -10,6 +10,9 @@ class ServiceManager;
namespace Service::LBL {
// Requests the LBL service passed to load brightness values from Settings
void RequestLoadCurrentSetting(SM::ServiceManager& sm);
void InstallInterfaces(SM::ServiceManager& sm);
} // namespace Service::LBL

View File

@@ -26,8 +26,7 @@ constexpr ResultCode ERR_NO_APPLICATION_AREA(ErrorModule::NFP, 152);
Module::Interface::Interface(std::shared_ptr<Module> module, Core::System& system, const char* name)
: ServiceFramework(name), module(std::move(module)), system(system) {
auto& kernel = system.Kernel();
nfc_tag_load = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IUser:NFCTagDetected");
nfc_tag_load = Kernel::WritableEvent::CreateEventPair(kernel, "IUser:NFCTagDetected");
}
Module::Interface::~Interface() = default;
@@ -66,10 +65,9 @@ public:
RegisterHandlers(functions);
auto& kernel = system.Kernel();
deactivate_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IUser:DeactivateEvent");
availability_change_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IUser:AvailabilityChangeEvent");
deactivate_event = Kernel::WritableEvent::CreateEventPair(kernel, "IUser:DeactivateEvent");
availability_change_event =
Kernel::WritableEvent::CreateEventPair(kernel, "IUser:AvailabilityChangeEvent");
}
private:

View File

@@ -9,6 +9,7 @@
#include "core/hle/kernel/writable_event.h"
#include "core/hle/service/nifm/nifm.h"
#include "core/hle/service/service.h"
#include "core/settings.h"
namespace Service::NIFM {
@@ -69,10 +70,8 @@ public:
RegisterHandlers(functions);
auto& kernel = system.Kernel();
event1 = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IRequest:Event1");
event2 = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IRequest:Event2");
event1 = Kernel::WritableEvent::CreateEventPair(kernel, "IRequest:Event1");
event2 = Kernel::WritableEvent::CreateEventPair(kernel, "IRequest:Event2");
}
private:
@@ -88,7 +87,12 @@ private:
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.PushEnum(RequestState::Connected);
if (Settings::values.bcat_backend == "none") {
rb.PushEnum(RequestState::NotSubmitted);
} else {
rb.PushEnum(RequestState::Connected);
}
}
void GetResult(Kernel::HLERequestContext& ctx) {
@@ -196,14 +200,22 @@ private:
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(1);
if (Settings::values.bcat_backend == "none") {
rb.Push<u8>(0);
} else {
rb.Push<u8>(1);
}
}
void IsAnyInternetRequestAccepted(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_NIFM, "(STUBBED) called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u8>(1);
if (Settings::values.bcat_backend == "none") {
rb.Push<u8>(0);
} else {
rb.Push<u8>(1);
}
}
Core::System& system;
};

Some files were not shown because too many files have changed in this diff Show More