Compare commits

...

297 Commits

Author SHA1 Message Date
Fernando Sahmkow
febb88efc4 Common/Alignment: Add noexcept where required. 2019-07-19 21:49:54 -04:00
Fernando Sahmkow
024b5fe91a Kernel: Address Feedback 2019-07-19 11:28:57 -04:00
Fernando Sahmkow
0901c33753 Common: Correct alignment allocator to work on C++14 or higher. 2019-07-19 11:11:42 -04:00
Fernando Sahmkow
9bede4eeed VM_Manager: Align allocated memory to 256bytes
This commit ensures that all backing memory allocated for the Guest CPU
is aligned to 256 bytes. This due to how gpu memory works and the heavy
constraints it has in the alignment of physical memory.
2019-07-19 10:06:08 -04:00
bunnei
5d369112d9 Merge pull request #2687 from lioncash/tls-process
kernel/process: Allocate the process' TLS region during initialization
2019-07-18 13:53:04 -04:00
bunnei
63bda67a34 Merge pull request #2738 from lioncash/shader-ir
shader-ir: Minor cleanup-related changes
2019-07-18 13:52:01 -04:00
David
d4b95bfc25 Merge pull request #2741 from FernandoS27/trace-log
Kernel: Downgrade WaitForAddress and SignalToAddress messages to Trace.
2019-07-18 13:58:29 +10:00
Fernando Sahmkow
5e457bf258 Kernel: Downgrade WaitForAddress and SignalToAddress messages to Trace.
This messages were originally set as warnning since few games used these
svcs and it was needed for debugging. This is no longer the case.
2019-07-17 22:05:47 -04:00
Fernando Sahmkow
223a535f3f Merge pull request #2740 from lioncash/bra
shader/decode/other: Correct branch indirect argument within BRA handling
2019-07-17 14:25:08 -04:00
Rodrigo Locatti
c3218c110f Merge pull request #2726 from lioncash/access
core: Remove CurrentArmInterface() global accessor
2019-07-17 03:42:16 -03:00
Lioncash
bebbdc2067 shader_ir: std::move Node instance where applicable
These are std::shared_ptr instances underneath the hood, which means
copying them isn't as cheap as a regular pointer. Particularly so on
weakly-ordered systems.

This avoids atomic reference count increments and decrements where they
aren't necessary for the core set of operations.
2019-07-16 19:49:23 -04:00
Lioncash
60926ac16b shader_ir: Rename Get/SetTemporal to Get/SetTemporary
This is more accurate in terms of describing what the functions are
actually doing. Temporal relates to time, not the setting of a temporary
itself.
2019-07-16 19:47:43 -04:00
Lioncash
44d87ff641 shader_ir: Remove unused includes
Removes unnecessary header dependencies.
2019-07-16 19:47:42 -04:00
Fernando Sahmkow
b56e7f870a Merge pull request #2565 from ReinUsesLisp/track-indirect
shader/track: Track indirect buffers
2019-07-16 14:58:35 -04:00
Lioncash
e2d7dda166 shader/decode/other: Correct branch indirect argument within BRA handling
This appears to have been a copy/paste error introduced within
8a6fc529a9
2019-07-16 12:20:45 -04:00
Fernando Sahmkow
1bdb59fc6e Merge pull request #2695 from ReinUsesLisp/layer-viewport
gl_shader_decompiler: Implement gl_ViewportIndex and gl_Layer in vertex shaders
2019-07-15 16:28:07 -04:00
bunnei
b77a1ed67a Merge pull request #2705 from FernandoS27/tex-cache-fixes
GPU: Fixes to Texture Cache and Include Microprofiles for GL State/BufferCopy/Macro Interpreter
2019-07-14 22:44:36 -04:00
ReinUsesLisp
afa8096df5 shader: Allow tracking of indirect buffers without variable offset
While changing this code, simplify tracking code to allow returning
the base address node, this way callers don't have to manually rebuild
it on each invocation.
2019-07-14 22:36:44 -03:00
bunnei
3477b92289 Merge pull request #2675 from ReinUsesLisp/opengl-buffer-cache
buffer_cache: Implement a generic buffer cache and its OpenGL backend
2019-07-14 19:03:43 -04:00
Fernando Sahmkow
2ac7472d3f Texture_Cache: Address Feedback 2019-07-14 17:42:39 -04:00
Fernando Sahmkow
0f54b541f4 Texture_Cache: Remove some unprecise fallback case and clang format 2019-07-14 12:00:32 -04:00
Fernando Sahmkow
5818959e54 Texture_Cache: Force Framebuffer reset if an active render target is unregistered. 2019-07-14 12:00:31 -04:00
Fernando Sahmkow
913b7a6872 GPU: Add a microprofile for macro interpreter 2019-07-14 12:00:30 -04:00
Fernando Sahmkow
a9943222f2 GL_State: Add a microprofile timer to OpenGL state. 2019-07-14 12:00:30 -04:00
Fernando Sahmkow
5c1e1a148e Gl_Texture_Cache: Measure Buffer Copy Times 2019-07-14 12:00:29 -04:00
Fernando Sahmkow
5d31bab69a Texture_Cache: Correct Linear Structural Match. 2019-07-14 12:00:28 -04:00
Fernando Sahmkow
4882c058fd Merge pull request #2690 from SciresM/physmem_fixes
Implement MapPhysicalMemory/UnmapPhysicalMemory
2019-07-14 09:16:46 -04:00
Fernando Sahmkow
0ec9da2f9f Merge pull request #2692 from ReinUsesLisp/tlds-f16
shader/texture: Add F16 support for TLDS
2019-07-14 08:44:38 -04:00
Flame Sage
b9e1db1312 Merge pull request #2730 from DarkLordZach/master
Finalize Azure Pipelines Definitions
2019-07-13 21:35:37 -04:00
Zach Hilman
bbc5b5d62d Finalize Azure Pipelines Definitions
d
2019-07-13 21:34:40 -04:00
Lioncash
093e5440e2 core: Remove CurrentArmInterface() global accessor
Replaces the final usage of the global accessor function and removes it.
Removes one more enabler of global state.
2019-07-12 21:48:49 -04:00
Zach Hilman
4d82158274 Merge pull request #2725 from ogniK5377/mult-audbuffer
"AudioRenderer" thread should have a unique name
2019-07-12 16:41:17 -04:00
David Marcec
ea5602b959 Clang format 2019-07-13 01:49:32 +10:00
David Marcec
31fe859fe5 Addressed issues 2019-07-13 01:35:40 +10:00
David Marcec
73b37886c1 "AudioRenderer" thread should have a unique name
Creating multiple "AudioRenderer" threads cause the previous thread to be overwritten. The thread will name be renamed to AudioRenderer-InstanceX, where X is the current instance number.
2019-07-13 01:22:08 +10:00
Michael Scire
d4fc560c05 Remove unicorn mappings/unmappings 2019-07-11 15:12:33 -07:00
bunnei
bb67091c77 Merge pull request #2609 from FernandoS27/new-scan
Implement a New Shader Scanner, Decompile Flow Stack and implement BRX BRA.CC
2019-07-11 17:36:23 -04:00
ReinUsesLisp
0eb0c24269 gl_shader_decompiler: Fix gl_PointSize redeclaration 2019-07-11 16:10:59 -03:00
bunnei
79c382fafd Merge pull request #2717 from SciresM/unmirror_memory
Restore memory perms on svcUnmapMemory/UnloadNro
2019-07-11 14:57:20 -04:00
bunnei
521fb325aa Merge pull request #2723 from lioncash/mem
core/arm: Remove obsolete Unicorn memory mapping
2019-07-11 14:56:26 -04:00
bunnei
2a94745500 Merge pull request #2724 from lioncash/sleep
service/am: Implement SetAutoSleepDisabled/IsAutoSleepDisabled
2019-07-11 14:56:06 -04:00
Lioncash
f4ae449f73 service/am: Implement IsAutoSleepDisabled
This simply queries whether or not auto-sleep facilities are disabled
and has no special handling. It's a basic getter function.
2019-07-11 13:34:55 -04:00
Lioncash
b81f6f67f5 service/am: Implement SetAutoSleepDisabled
Provides a basic implementation of SetAutoSleepDisabled. Until idle
handling is implemented, this is about the best we can do.

In the meantime, provide a rough documenting of specifics that occur
when this function is called on actual hardware.
2019-07-11 13:09:03 -04:00
Lioncash
8fc806e88a yuzu: Remove setting for using Unicorn
The JIT is mature enough that this setting can be removed, falling back
to Unicorn only on unsupported architectures. Any missing features from
Unicorn (of which there are extremely few), are mostly
developer-oriented, which most users don't care about.

Features should be coordinated with the JIT, not the interpreter,
anyhow.
2019-07-11 05:59:13 -04:00
Lioncash
70624e1c1d core/arm: Remove obsolete Unicorn memory mapping
This was initially necessary when AArch64 JIT emulation was in its
infancy and all memory-related instructions weren't implemented.

Given the JIT now has all of these facilities implemented, we can remove
these functions from the CPU interface.
2019-07-11 05:35:46 -04:00
Michael Scire
072a9796f5 Restore memory perms on svcUnmapMemory/UnloadNro
Prior to PR, Yuzu did not restore memory to RW-
on unmap of mirrored memory or unloading of NRO.

(In fact, in the NRO case, the memory was unmapped
instead of reprotected to --- on Load, so it was
actually lost entirely...)

This PR addresses that, and restores memory to RW-
as it should.

This fixes a crash in Super Smash Bros when creating
a World of Light save for the first time, and possibly
other games/circumstances.
2019-07-11 01:38:28 -07:00
ReinUsesLisp
aca40de224 gl_shader_decompiler: Fix conditional usage of GL_ARB_shader_viewport_layer_array 2019-07-11 04:27:00 -03:00
Flame Sage
0b3901bdd0 Merge pull request #2714 from DarkLordZach/repo-sync-pipeline
Add Repository Sync Pipeline
2019-07-10 22:22:04 -04:00
Zach Hilman
502358ab05 Add Repository Sync Pipeline 2019-07-10 22:18:32 -04:00
bunnei
fd066ffbce Merge pull request #2697 from lioncash/doc
gl_rasterizer: Amend documentation comment for ConfigureFramebuffers()
2019-07-10 16:38:09 -04:00
bunnei
7fb7054bc8 Merge pull request #2686 from ReinUsesLisp/vk-scheduler
vk_scheduler: Drop execution context in favor of views
2019-07-10 16:35:48 -04:00
bunnei
93eaea109d Merge pull request #2700 from ogniK5377/GetFriendList
IFriendService::GetFriendList
2019-07-10 16:29:48 -04:00
bunnei
463af08bed Merge pull request #2611 from DarkLordZach/pm-info-cmd
pm: Implement various pm commands for finding process and title IDs
2019-07-10 16:28:29 -04:00
bunnei
d707a12b9a Merge pull request #2650 from DarkLordZach/mii-iface-ver
mii: Implement IDatabaseService SetInterfaceVersion
2019-07-10 16:26:23 -04:00
bunnei
206ec29f17 Merge pull request #2691 from lioncash/override
video_core: Add missing override specifiers
2019-07-10 16:25:43 -04:00
Flame Sage
55245b6183 Merge pull request #2706 from DarkLordZach/azure-1
Add Pipeline Definitions for Azure CI
2019-07-09 22:22:21 -04:00
Zach Hilman
f2e5c19520 Add Pipeline Definitions 2019-07-09 22:17:56 -04:00
Flame Sage
05d55b0fd7 Set up CI with Azure Pipelines
[skip ci]
2019-07-09 22:01:46 -04:00
Fernando Sahmkow
f2549739d1 shader_ir: Add comments on missing instruction.
Also shows Nvidia's address space on comments.
2019-07-09 17:15:45 -04:00
Michael Scire
a1845d1dd3 prefer system reference over global accessor 2019-07-09 08:11:35 -07:00
Fernando Sahmkow
2de7649311 shader_ir: limit explorastion to best known program size. 2019-07-09 08:14:43 -04:00
Fernando Sahmkow
e7c6045a03 control_flow: Correct block breaking algorithm. 2019-07-09 08:14:43 -04:00
Fernando Sahmkow
dc4a93594c control_flow: Assert shaders bigger than limit. 2019-07-09 08:14:42 -04:00
Fernando Sahmkow
e7a88f0ab3 control_flow: Address feedback. 2019-07-09 08:14:42 -04:00
Fernando Sahmkow
34357b110c shader_ir: Correct parsing of scheduling instructions and correct sizing 2019-07-09 08:14:41 -04:00
Fernando Sahmkow
cfb3db1a32 shader_ir: Correct max sizing 2019-07-09 08:14:40 -04:00
Fernando Sahmkow
d45fed3030 shader_ir: Remove unnecessary constructors and use optional for ScanFlow result 2019-07-09 08:14:40 -04:00
Fernando Sahmkow
01b21ee1e8 shader_ir: Corrections, documenting and asserting control_flow 2019-07-09 08:14:39 -04:00
Fernando Sahmkow
d5533b440c shader_ir: Unify blocks in decompiled shaders. 2019-07-09 08:14:39 -04:00
Fernando Sahmkow
926b80102f shader_ir: Decompile Flow Stack 2019-07-09 08:14:38 -04:00
Fernando Sahmkow
459fce3a8f shader_ir: propagate shader size to the IR 2019-07-09 08:14:37 -04:00
Fernando Sahmkow
8a6fc529a9 shader_ir: Implement BRX & BRA.CC 2019-07-09 08:14:37 -04:00
Fernando Sahmkow
c218ae4b02 shader_ir: Remove the old scanner. 2019-07-09 08:14:36 -04:00
Fernando Sahmkow
8af6e6a052 shader_ir: Implement a new shader scanner 2019-07-09 08:14:36 -04:00
David Marcec
0330f5d6f8 IFriendService::GetFriendList
We don't have any friends implemented in Yuzu yet so it doesn't make sense to return any friends. For now we'll be returning 0 friends however the information provided will allow a proper implementation of this cmd when needed.
2019-07-09 18:20:58 +10:00
Lioncash
c04785c928 gl_rasterizer: Amend documentation comment for ConfigureFramebuffers()
must_reconfigure isn't a parameter for this function any more, so it can
be replaced with current_state.

While we're at it, we can make the parameters of the declaration match
the same name as the ones in the definition.
2019-07-09 02:08:15 -04:00
Michael Scire
697206092e Prevent merging of device mapped memory blocks.
This sets the DeviceMapped attribute for GPU-mapped memory blocks,
and prevents merging device mapped blocks. This prevents memory
mapped from the gpu from having its backing address changed by
block coalesce.
2019-07-08 22:52:05 -07:00
Zach Hilman
618d8446ab Merge pull request #2661 from ogniK5377/audren-loop
audren: Only manage wave buffers with a size
2019-07-08 09:35:42 -04:00
Zach Hilman
6c3cceafdc Merge pull request #2657 from ogniK5377/npad-assignments
hid:StartLrAssignmentMode, hid:StopLrAssignmentMode, hid:SwapNpadAssignment
2019-07-08 09:35:19 -04:00
David Marcec
5234e08a0d addressed issues 2019-07-08 14:51:40 +10:00
David Marcec
e3d000a7e6 addressed issue 2019-07-08 14:49:16 +10:00
bunnei
7b28f954c9 Merge pull request #2651 from DarkLordZach/apm-boost-mode-1
apm: Initial implementation of performance config and boost mode
2019-07-07 21:40:30 -04:00
bunnei
8f5aae3074 Merge pull request #2642 from DarkLordZach/fsp-log-2
fsp-srv: Implement Access Logging Functionality
2019-07-07 21:39:40 -04:00
ReinUsesLisp
c9d886c84e gl_shader_decompiler: Implement gl_ViewportIndex and gl_Layer in vertex shaders
This commit implements gl_ViewportIndex and gl_Layer in vertex and
geometry shaders. In the case it's used in a vertex shader, it requires
ARB_shader_viewport_layer_array. This extension is available on AMD and
Nvidia devices (mesa and proprietary drivers), but not available on
Intel on any platform. At the moment of writing this description I don't
know if this is a hardware limitation or a driver limitation.

In the case that ARB_shader_viewport_layer_array is not available,
writes to these registers on a vertex shader are ignored, with the
appropriate logging.
2019-07-07 20:42:55 -03:00
Michael Scire
ca6f08e3b1 Remove unused member function declaration 2019-07-07 13:02:41 -07:00
Michael Scire
ce64a9fab9 physmem: add helpers, cleanup logic. 2019-07-07 12:55:30 -07:00
Hexagon12
8070cb3f6b Merge pull request #2694 from FearlessTobi/patch-1
Delete decode_integer_set.cpp
2019-07-07 22:43:45 +03:00
Tobias
be020f7621 Delete decode_integer_set.cpp 2019-07-07 21:40:33 +02:00
Michael Scire
b901cd584e clang-format fixes 2019-07-07 12:08:29 -07:00
ReinUsesLisp
d0966b9f7c shader/texture: Add F16 support for TLDS 2019-07-07 16:05:56 -03:00
Michael Scire
1689784c19 address review commentary 2019-07-07 11:48:11 -07:00
Michael Scire
13a8fde3ad Implement MapPhysicalMemory/UnmapPhysicalMemory
This implements svcMapPhysicalMemory/svcUnmapPhysicalMemory for Yuzu,
which can be used to map memory at a desired address by games since
3.0.0.

It also properly parses SystemResourceSize from NPDM, and makes
information available via svcGetInfo.

This is needed for games like Super Smash Bros. and Diablo 3 -- this
PR's implementation does not run into the "ASCII reads" issue mentioned
in the comments of #2626, which was caused by the following bugs in
Yuzu's memory management that this PR also addresses:
* Yuzu's memory coalescing does not properly merge blocks. This results
  in a polluted address space/svcQueryMemory results that would be
  impossible to replicate on hardware, which can lead to game code making
  the wrong assumptions about memory layout.
  * This implements better merging for AllocatedMemoryBlocks.
* Yuzu's implementation of svcMirrorMemory unprotected the entire
  virtual memory range containing the range being mirrored. This could
  lead to games attempting to map data at that unprotected
  range/attempting to access that range after yuzu improperly unmapped
  it.
  * This PR fixes it by simply calling ReprotectRange instead of
    Reprotect.
2019-07-07 11:45:53 -07:00
Lioncash
56c7912159 kernel/process: Allocate the process' TLS region during initialization
Prior to execution within a process beginning, the process establishes
its own TLS region for uses (as far as I can tell) related to exception
handling.

Now that TLS creation was decoupled from threads themselves, we can add
this behavior to our Process class. This is also good, as it allows us
to remove a stub within svcGetInfo, namely querying the address of that
region.
2019-07-07 14:08:28 -04:00
Lioncash
eb6f55d880 kernel/process: Move main thread stack allocation to its own function
Keeps this particular set of behavior isolated to its own function.
2019-07-07 14:08:25 -04:00
Lioncash
cbdd6cd1c0 vk_sampler_cache: Remove unused includes
These are no longer used within this header, so they can be removed.
2019-07-07 13:40:36 -04:00
Lioncash
4b27680639 video_core: Add missing override specifiers 2019-07-07 13:38:39 -04:00
ReinUsesLisp
86a874a2fc vk_scheduler: Drop execution context in favor of views
Instead of passing by copy an execution context through out the whole
Vulkan call hierarchy, use a command buffer view and fence view
approach.

This internally dereferences the command buffer or fence forcing the
user to be unable to use an outdated version of it on normal usage.
It is still possible to keep store an outdated if it is casted to
VKFence& or vk::CommandBuffer.

While changing this file, add an extra parameter for Flush and Finish to
allow releasing the fence from this calls.
2019-07-07 03:30:22 -03:00
Zach Hilman
a4ef86a021 mii: Implement IDatabaseService SetInterfaceVersion
Appears to set a member variable used to affect the API that games access, and the method used to store data.
2019-07-06 21:39:12 -04:00
Zach Hilman
9e689a81f8 Merge pull request #2674 from lioncash/reporter
core/reporter: Minor changes
2019-07-06 21:26:40 -04:00
Zach Hilman
fb9124b6cd Merge pull request #2677 from lioncash/assert
kernel/vm_manager: Handle stack/TLS IO region placement a little better
2019-07-06 21:25:27 -04:00
Zach Hilman
f732cd5a4b Merge pull request #2684 from SciresM/suspend_tick
am: Implement GetAccumulatedSuspendedTickValue
2019-07-06 21:19:20 -04:00
Michael Scire
36259c01c2 clang-format fixes 2019-07-06 13:52:05 -07:00
Michael Scire
7fb7d3c218 am: Implement GetAccumulatedSuspendedTickValue 2019-07-06 12:13:34 -07:00
Lioncash
65c748fbd3 memory: Remove unused includes
These aren't used within the central memory management code, so they can
be removed.
2019-07-06 02:24:34 -04:00
Lioncash
63a5f48e7e memory: Remove unused PageTable forward declaration
This isn't used by anything in the header file, so it can be removed.
2019-07-06 02:24:34 -04:00
Lioncash
2a9e388290 kernel/vm_manager: Rename 'new map' to 'stack'
Provides a more accurate name for the memory region and also
disambiguates between the map and new map regions of memory, making it
easier to understand.
2019-07-06 02:24:30 -04:00
ReinUsesLisp
79a23ca5f0 buffer_cache: Avoid [[nodiscard]] to make clang-format happy 2019-07-06 01:17:05 -03:00
ReinUsesLisp
83050c9495 buffer_cache: Try to fix MinGW build 2019-07-06 01:14:05 -03:00
ReinUsesLisp
f7691ebe57 gl_rasterizer: Fix nullptr dereference on disabled buffers 2019-07-06 00:37:56 -03:00
ReinUsesLisp
7ecf64257a gl_rasterizer: Minor style changes 2019-07-06 00:37:55 -03:00
ReinUsesLisp
9cdc576f60 gl_rasterizer: Fix vertex and index data invalidations 2019-07-06 00:37:55 -03:00
ReinUsesLisp
1fa21fa192 gl_buffer_cache: Implement with generic buffer cache 2019-07-06 00:37:55 -03:00
ReinUsesLisp
32c0212b24 buffer_cache: Implement a generic buffer cache
Implements a templated class with a similar approach to our current
generic texture cache. It is designed to be compatible with Vulkan and
OpenGL,
2019-07-06 00:37:55 -03:00
ReinUsesLisp
2bcae41a73 gl_buffer_cache: Remove global system getters 2019-07-06 00:37:55 -03:00
ReinUsesLisp
02ab844934 gl_device: Query SSBO alignment 2019-07-06 00:37:55 -03:00
ReinUsesLisp
d14fbfb9b5 gl_buffer_cache: Implement flushing 2019-07-06 00:37:55 -03:00
ReinUsesLisp
345f852bdb gl_rasterizer: Drop gl_global_cache in favor of gl_buffer_cache 2019-07-06 00:37:55 -03:00
ReinUsesLisp
8155b12d3d gl_buffer_cache: Rework to support internalized buffers 2019-07-06 00:37:55 -03:00
ReinUsesLisp
f8ba72d491 gl_buffer_cache: Store in CachedBufferEntry the used buffer handle 2019-07-06 00:37:55 -03:00
ReinUsesLisp
b54fb8fc4c gl_buffer_cache: Return used buffer from Upload function 2019-07-06 00:37:55 -03:00
ReinUsesLisp
a6d2f52fc3 gl_rasterizer: Add some commentaries 2019-07-06 00:37:55 -03:00
ReinUsesLisp
2b9d4088ec gl_rasterizer: Make DrawParameters rasterizer instance const 2019-07-06 00:37:55 -03:00
ReinUsesLisp
2e39c20da5 gl_rasterizer: Move index buffer uploading to its own method 2019-07-06 00:37:55 -03:00
Lioncash
313cc36fec kernel/vm_manager: Handle stack/TLS IO region placement better
Handles the placement of the stack a little nicer compared to the
previous code, which was off in a few ways. e.g.

The stack (new map) region, shouldn't be the width of the entire address
space if the size of the region calculation ends up being zero. It
should be placed at the same location as the TLS IO region and also have
the same size.

In the event the TLS IO region contains a size of zero, we should also
be doing the same thing. This fixes our memory layout a little bit and
also resolves some cases where assertions can trigger due to the memory
layout being incorrect.
2019-07-05 21:57:31 -04:00
Lioncash
48807e9a24 core/reporter: Allow moves into SaveToFile()
Taking the json instance as a constant reference, makes all moves into
the parameter non-functional, resulting in copies. Taking it by value
allows moves to function.
2019-07-05 17:45:34 -04:00
Lioncash
2321656dbe core/reporter: Add missing includes and forward declarations
Adds missing inclusions to prevent potential compilation issues.
2019-07-05 17:45:24 -04:00
Lioncash
e721c344ae core/reporter: Remove unnecessary namespace qualifiers
The Reporter class is part of the Core namespace, so the System class
doesn't need to be qualified.
2019-07-05 17:09:26 -04:00
Lioncash
6ec48af222 core/reporter: Remove pessimizing move in GetHLERequestContextData()
This can inhibit copy-elision, so we can remove this redundant move.
2019-07-05 17:08:13 -04:00
Lioncash
f12eb40834 core/reporter: Make bracing consistent
Makes all control statements braced, regardless of their size, making
code more uniform.
2019-07-05 17:05:27 -04:00
Lioncash
7ad11e3867 core/reporter: Return in error case in SaveToFile()
If the path couldn't be created, then we shouldn't be attempting to save
the file.
2019-07-05 17:02:32 -04:00
Zach Hilman
772c86a260 Merge pull request #2601 from FernandoS27/texture_cache
Implement a new Texture Cache
2019-07-05 13:39:13 -04:00
Fernando Sahmkow
3b9d89839d texture_cache: Address Feedback 2019-07-05 09:46:53 -04:00
Fernando Sahmkow
30b176f92b texture_cache: Correct Texture Buffer Uploading 2019-07-04 19:38:19 -04:00
Zach Hilman
3f3a93f13b Merge pull request #2669 from FearlessTobi/move-cpujit-setting
yuzu: Move CPU Jit setting to Debug tab
2019-07-04 15:33:59 -04:00
Zach Hilman
54a02d14fd Merge pull request #2555 from lioncash/tls
kernel/process: Decouple TLS handling from threads
2019-07-04 15:32:32 -04:00
fearlessTobi
447bdac298 yuzu: Remove CPU Jit setting from the UI
A normal user shouldn't change this, as it will slow down the emulation and can lead to bugs or crashes. The renaming is done in order to prevent users from leaving this on without a way to turn it off from the UI.
2019-07-04 14:48:08 +02:00
bunnei
cca663792f Merge pull request #2670 from DarkLordZach/fix-merge-discrep-1
gl_shader_cache: Make CachedShader constructor private
2019-07-04 03:03:44 -04:00
bunnei
3c7eed16dc Merge pull request #2658 from ogniK5377/QueryAudioDeviceOutputEvent
IAudioDevice::QueryAudioDeviceOutputEvent
2019-07-04 01:42:22 -04:00
bunnei
70b595a63b Merge pull request #2638 from DarkLordZach/quest-flag
set: Implement GetQuestFlag with config option
2019-07-04 01:40:41 -04:00
Zach Hilman
ad50cd7df9 gl_shader_cache: Make CachedShader constructor private
Fixes missing review comments introduced.
2019-07-03 20:39:46 -04:00
Lioncash
e23110bd9f kernel/process: Default initialize all member variables
Ensures a Process instance is always created with a deterministic
initial state.
2019-07-03 20:31:40 -04:00
Lioncash
abdce723eb kernel/process: Decouple TLS handling from threads
Extracts out all of the thread local storage management from thread
instances themselves and makes the owning process handle the management
of the memory. This brings the memory management slightly more in line
with how the kernel handles these allocations.

Furthermore, this also makes the TLS page management a little more
readable compared to the lingering implementation that was carried over
from Citra.
2019-07-03 20:31:40 -04:00
Lioncash
55481df50f kernel/vm_manager: Add overload of FindFreeRegion() that operates on a boundary
This will be necessary for making our TLS slot management slightly more
straightforward. This can also be utilized for other purposes in the
future.

We can implement the existing simpler overload in terms of this one
anyways, we just pass the beginning and end of the ASLR region as the
boundaries.
2019-07-03 20:29:49 -04:00
Zach Hilman
beb3d77a79 Merge pull request #2613 from ogniK5377/InitalizeApplicationInfo
Implemented InitializeApplicationInfo & InitializeApplicationInfoRestricted
2019-07-03 20:23:56 -04:00
Zach Hilman
e86af37ecb Merge pull request #2608 from ogniK5377/Time_GetSharedMemoryNativeHandle
Implement Time::GetSharedMemoryNativeHandle
2019-07-03 20:22:23 -04:00
Zach Hilman
da5a537029 Merge pull request #2563 from ReinUsesLisp/shader-initializers
gl_shader_cache: Use static constructors for CachedShader initialization
2019-07-03 20:20:05 -04:00
bunnei
58032e0085 Merge pull request #2604 from ogniK5377/INotificationService
Implemented INotificationService
2019-07-02 23:32:57 -04:00
bunnei
2521007c09 Merge pull request #2659 from FernandoS27/safe-caches
rasterizer_cache: Protect inherited caches from submission level
2019-07-02 22:27:04 -04:00
Zach Hilman
6be79bab37 Merge pull request #2660 from bakugo/deltafragments
file_sys: Ignore DeltaFragment NCAs during installation
2019-07-01 22:27:24 -05:00
Bakugo
b50557d1f0 file_sys: Rename other ContentRecordType members 2019-07-02 00:57:23 +01:00
David Marcec
b82b5e46e7 audren: Only manage wave buffers with a size
We shouldn't be incrementing if wave buffers are empty. They are considered invalid/unused wave buffers.

This fixes the issue of certain sounds looping when they shouldn't
2019-07-01 21:20:23 +10:00
Fernando Sahmkow
4705d1b523 rasterizer_cache: Protect inherited caches from submission level 2019-07-01 04:32:01 -04:00
David Marcec
965608e6d1 IAudioDevice::QueryAudioDeviceOutputEvent
The event should only be signaled when an output audio device gets changed. Example, Speaker to USB headset. We don't identify different devices internally yet so there's no need to signal the event yet.
2019-07-01 18:05:44 +10:00
Bakugo
c72ef5f405 file_sys/registered_cache: Improve missing metadata error
This can happen when installing NSPs too, not just XCIs.
2019-07-01 07:31:32 +01:00
Bakugo
9968c0883a file_sys/submission_package: Don't warn about missing DeltaFragment NCAs
DeltaFragments are not useful to us and are often not included in patch NSPs.
2019-07-01 06:46:05 +01:00
Bakugo
79163fca80 file_sys/registered_cache: Ignore DeltaFragment NCAs during installation
DeltaFragments are only used to download and apply partial patches on a real console, and are not useful to us at all. Most patch NSPs do not include them, and when they do, it's a waste of space to install them.
2019-07-01 06:37:22 +01:00
Bakugo
306a24aad7 file_sys: Rename ContentRecordType::Patch to DeltaFragment
Avoids potential confusion, since patches and DeltaFragments are not the same thing. Actual full patches are listed under the Program type.
2019-07-01 06:32:13 +01:00
David Marcec
472210bf72 hid:StartLrAssignmentMode, hid:StopLrAssignmentMode, hid:SwapNpadAssignment
StartLrAssignmentMode and StopLrAssignmentMode don't require any implementation as it's just used for showing the screen of changing the controller orientation if the user wishes to do so.  Ever since #1634 this has not been needed as users can specify the controller orientation from the config and swap at any time. We store a private member just in case this gets used for anything extra in the future
2019-07-01 15:12:57 +10:00
bunnei
d992909636 Merge pull request #2583 from FernandoS27/core-timing-safe
Core_Timing: Make core_timing threadsafe by default.
2019-06-30 12:54:00 -04:00
ReinUsesLisp
6e1db6b703 texture_cache: Pack sibling queries inside a method 2019-06-29 20:47:46 -03:00
ReinUsesLisp
8eae66907e texture_cache: Use std::vector reservation for sampled_textures 2019-06-29 20:10:31 -03:00
ReinUsesLisp
f6f1a8f26a texture_cache: Style changes 2019-06-29 19:52:37 -03:00
Zach Hilman
1ca2b504bf Merge pull request #2653 from FearlessTobi/revert-2474-patch-1
Revert "CMake: Get Git submodule dependencies via CMake"
2019-06-29 16:55:47 -05:00
ReinUsesLisp
dd9ace502b texture_cache: Use std::array for siblings_table 2019-06-29 18:54:13 -03:00
ReinUsesLisp
3f3c3ca5f9 texture_cache: Address feedback 2019-06-29 17:29:39 -03:00
Zach Hilman
7e5d7773cc am: Implement SetCpuBoostMode in terms of APM 2019-06-28 22:46:51 -04:00
Zach Hilman
e2ad3e1fb0 core: Keep instance of APM Controller 2019-06-28 22:46:31 -04:00
Zach Hilman
e52306ca60 apm: Implement SetCpuBoostMode 2019-06-28 22:46:00 -04:00
Zach Hilman
1c6e6305ea apm: Add getters for performance config and mode 2019-06-28 22:45:31 -04:00
Zach Hilman
9175b00e7d apm: Add apm:am service
8.0.0+ identical version of apm
2019-06-28 22:44:30 -04:00
Zach Hilman
65eb9cbb28 apm: Add Controller class to manage speed data and application 2019-06-28 22:43:51 -04:00
Zach Hilman
d40f38967e fsp-srv: Implement GetAccessLogVersionInfo
Returns some misc. data about logging to help the game determine if it should log.
2019-06-28 21:05:42 -04:00
Zach Hilman
554e2f2f98 reporter: Add report class for filesystem access logs 2019-06-28 21:02:50 -04:00
Zach Hilman
db2fdd0352 fsp-srv: Implement OutputAccessLogToSdCard
Allows games to log data to the SD.
2019-06-28 21:02:34 -04:00
Zach Hilman
f477c5dfdd set: Implement GetQuestFlag
Simply returns a true/false value indicating if the system is a kiosk system. This has been mapped to a config option for the purposes of yuzu.
2019-06-28 18:38:47 -04:00
Zach Hilman
efa7d8d04b settings: Add config option for kiosk (quest) mode 2019-06-28 18:37:33 -04:00
David Marcec
dfe4b3f723 Attemp clang format fix?
Seems to be an issue with clang format
2019-06-28 22:08:50 +10:00
David Marcec
7d417d501d Added errors.h to cmakelist 2019-06-28 15:31:29 +10:00
David Marcec
c2146c4eef Addressed issues 2019-06-28 15:29:38 +10:00
David Marcec
fd6549be73 Addressed issues 2019-06-28 15:19:51 +10:00
David Marcec
0b03e8a98f Implemented InitializeApplicationInfo & InitializeApplicationInfoRestricted
InitializeApplicationInfoRestricted will need further implementation as it's checking for other user requirements about the game. As we're emulating, we're assuming the user owns the game so we skip these checks currently, implementation will need to be added further on
2019-06-27 16:44:42 +10:00
Zach Hilman
bce4bfffb6 pm: Implement pm:shell and pm:dmnt GetApplicationPid
Returns the process ID of the current application or 0 if no app is running.
2019-06-26 19:07:34 -04:00
Zach Hilman
354c254cde pm: Implement pm:dmnt GetTitlePid
Takes a title ID and searches for a matching process, returning error if it doesn't exist, otherwise the process ID.
2019-06-26 19:06:51 -04:00
Zach Hilman
49af3bcdcb pm: Implement pm:info GetTitleId
Searches the process list for a process with the specified ID, returning the title ID if it exists.
2019-06-26 19:05:04 -04:00
David Marcec
f67039c067 Addressed issues 2019-06-26 16:52:34 +10:00
Fernando Sahmkow
223ca80753 texture_cache: Correct variable naming. 2019-06-25 19:35:08 -04:00
Fernando Sahmkow
5aeabd9a17 gl_texture_cache: Correct asserts 2019-06-25 19:26:59 -04:00
Fernando Sahmkow
88bc39374f texture_cache: Corrections, documentation and asserts 2019-06-25 18:36:19 -04:00
Fernando Sahmkow
c0abc7124d surface_params: Corrections, asserts and documentation. 2019-06-25 18:03:25 -04:00
Fernando Sahmkow
fb234560b0 copy_params: use constexpr for constructor 2019-06-25 17:42:50 -04:00
Fernando Sahmkow
18d24fbdd0 gl_texture_cache: Corrections and fixes 2019-06-25 17:40:08 -04:00
Fernando Sahmkow
36665ce0b2 gl_resource_manager: Correct MakeStreamCopy 2019-06-25 17:32:04 -04:00
Fernando Sahmkow
58c8a44e7a texture_cache: Query MemoryManager from the system 2019-06-25 17:26:00 -04:00
David Marcec
19dc36ce06 Implement Time::GetSharedMemoryNativeHandle
This PR attempts to implement the shared memory provided by GetSharedMemoryNativeHandle. There is still more work to be done however that requires a rehaul of the current time module to handle clock contexts. This PR is mainly to get the basic functionality of the SharedMemory working and allow the use of addition to it whilst things get improved on.

Things to note:
Memory Barriers are used in the SharedMemory and a better solution would need to be done to implement this. Currently in this PR I’m faking the memory barriers as everything is sync and single threaded. They work by incrementing the counter and just populate the two data slots. On data reading, it will read the last added data.

Specific values in the shared memory would need to be updated periodically. This isn't included in this PR since we don't actively do this yet. In a later PR when time is refactored this should be done.

Finally, as we don't handle clock contexts. When time is refactored, we will need to update the shared memory for specific contexts. This PR does this already however since the contexts are all identical and not separated. We're just updating the same values for each context which in this case is empty.

Tiime:SetStandardUserSystemClockAutomaticCorrectionEnabled, Time:IsStandardUserSystemClockAutomaticCorrectionEnabled are also partially implemented in this PR. The reason the implementation is partial is because once again, a lack of clock contexts. This will be improved on in a future PR.

This PR closes issue #2556
2019-06-26 00:45:53 +10:00
David Marcec
192f1f7ebe SizedNotificationInfo should be 0x10 bytes, user_uuid is incorrect, this should be the users account id 2019-06-25 15:19:37 +10:00
David Marcec
5d005b87a3 fixed spelling errors and fixed issue with Pop not returning the SizedNotificationInfo 2019-06-25 11:23:23 +10:00
ReinUsesLisp
7565389700 texture_cache: Include "core/core.h" 2019-06-24 02:15:57 -03:00
ReinUsesLisp
e723441e37 gl_texture_cache: Explicitly add indirect include 2019-06-24 02:13:55 -03:00
ReinUsesLisp
34841a41c3 texture_cache/surface_view: Address feedback 2019-06-24 02:09:56 -03:00
ReinUsesLisp
0837290992 texture_cache/surface_base: Address feedback 2019-06-24 02:08:52 -03:00
ReinUsesLisp
75de730e28 video_core/surface: Address feedback 2019-06-24 02:07:11 -03:00
ReinUsesLisp
10a83653ee decode/texture: Address feedback 2019-06-24 02:05:05 -03:00
ReinUsesLisp
4504302abc renderer_opengl/utils: Remove unused includes and unused forward declaration 2019-06-24 02:03:37 -03:00
ReinUsesLisp
4b2ff1e00e gl_texture_cache: Address some feedback 2019-06-24 02:01:44 -03:00
ReinUsesLisp
0b6df52109 gl_shader_disk_cache: Address feedback 2019-06-24 01:59:32 -03:00
ReinUsesLisp
b8b05a484a gl_shader_decompiler: Address feedback 2019-06-24 01:56:38 -03:00
ReinUsesLisp
4d63f97945 shader_bytecode: Include missing <array> 2019-06-24 01:51:02 -03:00
ReinUsesLisp
de982deb25 common/alignment: Address feedback 2019-06-24 01:47:09 -03:00
David Marcec
e49ae3bf92 Implemented INotificationService 2019-06-24 12:26:45 +10:00
Fernando Sahmkow
d1812316e1 texture_cache: Style and Corrections 2019-06-20 21:24:47 -04:00
Fernando Sahmkow
51ba60b27e shader_cache: Correct versioning and size calculation. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
97c8c9f49a texture_cache: Eliminate linear textures fallthrough 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
6acdae0e4c texture_cache: Correct format R16U as sibling 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
d7587842eb texture_cache: Implement texception detection and texture barriers. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
198a0395bb texture_cache: Corrections to buffers and shadow formats use. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
fed773a86c texture_cache: Implement Irregular Views in surfaces 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
082740d34d surface: Correct format S8Z24 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
03d489dcf5 texture_cache: Initialize all siblings to invalid pixel format. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
9422cf7c10 gl_texture_cache: Use Stream Buffers instead of Persistant for Buffer Copies. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
fac3706253 gl_texture_cache: Correct Image Blit 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
7232a1ed16 decoders: correct block calculation 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
3dd7643214 texture_cache: Use siblings textures on Rebuild and fix possible error on blitting 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
4db28f72f6 texture_cache: Remove old rasterizer cache 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
2d83553ea7 texture_cache: Implement siblings texture formats. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
cb728797b0 fermi2d: Correct Origin Mode 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
a56f687793 texture_cache: correct texture buffer on surface params 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
b01f9c8a70 texture_cache: eliminate accelerated depth->color/color->depth copies due to driver instability. 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
561ce29c98 texture_cache: correct mutex locks 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
b7de31ac97 shader_ir: Fix image copy rebase issues 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
6f69f06873 texture_cache: Don't Image Copy if component types differ 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
9f755218a1 texture_cache: move some large methods to cpp files 2019-06-20 21:38:34 -03:00
Fernando Sahmkow
3809041c24 texture_cache: Optimize GetSurface and use references on functions that don't change a surface. 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
60bf761afb texture_cache: Implement Buffer Copy and detect Turing GPUs Image Copies 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
228f516bb4 texture_cache uncompress-compress is untopological.
This makes conflicts between non compress and compress textures to be 
auto recycled. It also limits the amount of mipmaps a texture can have 
if it goes above it's limit.
2019-06-20 21:38:33 -03:00
Fernando Sahmkow
9251354152 texture_cache: Correct copying between compressed and uncompressed formats 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
0966665fc2 texture_cache: Only load on recycle with accurate GPU.
Testing so far has proven this to be quite safe as texture memory read 
added a 2-5ms load to the current cache.
2019-06-20 21:38:33 -03:00
Fernando Sahmkow
ea1525dab1 Fix rebase errors 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
bdf9faab33 texture_cache: Handle uncontinuous surfaces. 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
e60ed2bb3e texture_cache: return null surface on invalid address 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
fcac55d5bf texture_cache: Add checks for texture buffers. 2019-06-20 21:38:33 -03:00
Fernando Sahmkow
175aa343ff texture_cache: Fermi2D reform and implement View Mirage
This also does some fixes on compressed textures reinterpret and on the
Fermi2D engine in general.
2019-06-20 21:38:33 -03:00
ReinUsesLisp
1bf4154e7d gl_shader_decompiler: Implement image binding settings 2019-06-20 21:38:33 -03:00
ReinUsesLisp
9097301d92 shader: Implement bindless images 2019-06-20 21:38:33 -03:00
ReinUsesLisp
06c4ce8645 shader: Decode SUST and implement backing image functionality 2019-06-20 21:38:33 -03:00
ReinUsesLisp
007ffbef1c gl_rasterizer: Track texture buffer usage 2019-06-20 21:38:33 -03:00
ReinUsesLisp
58c0d37422 video_core: Make ARB_buffer_storage a required extension 2019-06-20 21:36:12 -03:00
ReinUsesLisp
07f7ce1da2 gl_rasterizer_cache: Use texture buffers to emulate texture buffers 2019-06-20 21:36:12 -03:00
ReinUsesLisp
b8c75a845b maxwell_3d: Partially implement texture buffers as 1D textures 2019-06-20 21:36:12 -03:00
ReinUsesLisp
6c81c8f5b7 gl_shader_decompiler: Allow 1D textures to be texture buffers 2019-06-20 21:36:12 -03:00
ReinUsesLisp
4e81fc8296 shader: Implement texture buffers 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
d267948a73 texture_cache: loose TryReconstructSurface when accurate GPU is not on.
Also corrects some asserts.
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
6162cb922e texture_cache: Document the most important methods. 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
4530511ee4 texture_cache: Try to Reconstruct Surface on bigger than overlap.
This fixes clouds in SMO Cap Kingdom and lens on Cloud Kingdom.
Also moved accurate_gpu setting check to Pick Strategy
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
a79831d9d0 texture_cache: Implement Guard mechanism 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
7731a0e2d1 texture_cache: General Fixes
Fixed ASTC mipmaps loading
Fixed alignment on openGL upload/download
Fixed Block Height Calculation
Removed unalign_height
2019-06-20 21:36:12 -03:00
ReinUsesLisp
c2ed348bdd surface_params: Ensure pitch is always written to avoid surface leaks 2019-06-20 21:36:12 -03:00
ReinUsesLisp
9098905dd1 gl_framebuffer_cache: Use a hashed struct to cache framebuffers 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
d65a4af895 texture_cache return invalid buffer on deactivated color_mask 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
6bd034eae9 engine_upload: Addapt to new Texture Cache 2019-06-20 21:36:12 -03:00
ReinUsesLisp
2131f71573 surface_params: Optimize CreateForTexture
Instead of using Common::AlignUp, use Common::AlignBits to align the
texture compression factor.
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
41b4674458 gl_texture_cache: Make main views be proxy textures instead of a full view. 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
07cc7e0c12 texture_cache: Add ASync Protections 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
1bbc9debfb Remove Framebuffer reconfiguration and restrict rendertarget protection 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
5192521dc3 texture_cache: Implement GPU Dirty Flags 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
94f2be5473 texture_cache: Optimize GetMipBlockHeight and GetMipBlockDepth 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
a4a58be2d4 texture_cache: Implement L1_Inner_cache 2019-06-20 21:36:12 -03:00
ReinUsesLisp
345e73f2fe video_core: Use un-shifted block sizes to avoid integer divisions
Instead of storing all block width, height and depths in their shifted
form:

block_width = 1U << block_shift;

Store them like they are provided by the emulated hardware (their
block_shift form). This way we can avoid doing the costly
Common::AlignUp operation to align texture sizes and drop CPU integer
divisions with bitwise logic (defined in Common::AlignBits).
2019-06-20 21:36:12 -03:00
ReinUsesLisp
28d7c2f5a5 texture_cache: Change internal cache from lists to vectors 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
b347543e83 Reduce amount of size calculations. 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
4e2071b6d9 texture_cache: Correct premature texceptions
Due to our current infrastructure, it is possible for a mipmap to be set 
on as a render target before a texception of that mipmap's superset be 
set afterwards. This is problematic as we rely on texture views to set 
up texceptions and protecting render targets targets for 3D texture 
rendering.

One simple solution is to configure framebuffers after texture setup but 
this brings other problems. This solution, forces a reconfiguration of 
the framebuffers after such event happens.
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
ba677ccb5a texture_cache: Implement guest flushing 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
de0b1cb2b2 Fixes to mipmap's process and reconstruct process 2019-06-20 21:36:12 -03:00
ReinUsesLisp
e0002599ac surface_base: Add parenthesis to EmplaceOverview's predicate 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
324e470879 Texture Cache: Implement Blitting and Fermi Copies 2019-06-20 21:36:12 -03:00
ReinUsesLisp
549fd18ac4 surface_view: Add constructor for ViewParams 2019-06-20 21:36:12 -03:00
ReinUsesLisp
16e8625a30 surface_base: Split BreakDown into layered and non-layered variants 2019-06-20 21:36:12 -03:00
ReinUsesLisp
2b30000a1e surface_base: Silence truncation warnings and minor renames and reordering 2019-06-20 21:36:12 -03:00
ReinUsesLisp
03d10ea3b4 copy_params: Use constructor instead of C-like initialization 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
1af4414861 Correct Mipmaps View method in Texture Cache 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
d86f9cd709 Change texture_cache chaching from GPUAddr to CacheAddr
This also reverses the changes to make invalidation and flushing through
the GPU address.
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
b711cdce78 Corrections to Structural Matching
The texture will now be reconstructed if the width only matches on GoB 
alignment.
2019-06-20 21:36:12 -03:00
Fernando Sahmkow
bc930754cc Implement Texture Cache V2 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
3d471e732d Correct Surface Base and Views for new Texture Cache 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
3b26206dbd Add OGLTextureView 2019-06-20 21:36:12 -03:00
Fernando Sahmkow
6b0695b3cd Deglobalize Memory Manager on texture cahe and Implement Invalidation and Flushing using GPUVAddr 2019-06-20 21:36:11 -03:00
ReinUsesLisp
6c410104f4 texture_cache: Remove execution context copies from the texture cache
This is done to simplify the OpenGL implementation, it is needed for
Vulkan.
2019-06-20 21:36:11 -03:00
ReinUsesLisp
fa59a7b4d8 gl_texture_cache: Implement fermi copies 2019-06-20 21:36:11 -03:00
ReinUsesLisp
1b4503c571 texture_cache: Split texture cache into different files 2019-06-20 21:36:11 -03:00
ReinUsesLisp
5f3aacdc37 texture_cache: Move staging buffer into a generic implementation 2019-06-20 21:36:11 -03:00
ReinUsesLisp
2787a0c287 texture_cache: Flush 3D textures in the order they are drawn 2019-06-20 21:36:11 -03:00
ReinUsesLisp
4b396f375c gl_texture_cache: Minor changes 2019-06-20 21:36:11 -03:00
ReinUsesLisp
0cefb7bcb4 gl_texture_cache: Add copy from multiple overlaps into a single surface 2019-06-20 21:36:11 -03:00
ReinUsesLisp
84139586c9 gl_texture_cache: Attach surface textures instead of views 2019-06-20 21:36:11 -03:00
ReinUsesLisp
fb94871791 gl_texture_cache: Add fast copy path 2019-06-20 21:36:11 -03:00
ReinUsesLisp
bab21e8cb3 gl_texture_cache: Initial implementation 2019-06-20 21:36:11 -03:00
Fernando Sahmkow
90792cdb6e Core_Timing: Make core_timing threadsafe by default.
The old implementation had faulty Threadsafe methods where events could
be missing. This implementation unifies unsafe/safe methods and makes
core timing thread safe overall.
2019-06-16 14:14:35 -04:00
ReinUsesLisp
4ec8a3df08 gl_shader_cache: Use static constructors for CachedShader initialization 2019-06-07 20:20:22 -03:00
225 changed files with 8778 additions and 4664 deletions

View File

@@ -0,0 +1,15 @@
#!/bin/bash -ex
# Copy documentation
cp license.txt "$REV_NAME"
cp README.md "$REV_NAME"
tar $COMPRESSION_FLAGS "$ARCHIVE_NAME" "$REV_NAME"
mv "$REV_NAME" $RELEASE_NAME
7z a "$REV_NAME.7z" $RELEASE_NAME
# move the compiled archive into the artifacts directory to be uploaded by travis releases
mv "$ARCHIVE_NAME" artifacts/
mv "$REV_NAME.7z" artifacts/

View File

@@ -0,0 +1,6 @@
#!/bin/bash -ex
GITDATE="`git show -s --date=short --format='%ad' | sed 's/-//g'`"
GITREV="`git show -s --format='%h'`"
mkdir -p artifacts

View File

@@ -0,0 +1,6 @@
#!/bin/bash -ex
# Run clang-format
cd /yuzu
chmod a+x ./.ci/scripts/format/script.sh
./.ci/scripts/format/script.sh

View File

@@ -0,0 +1,4 @@
#!/bin/bash -ex
chmod a+x ./.ci/scripts/format/docker.sh
docker run -v $(pwd):/yuzu yuzuemu/build-environments:linux-clang-format /bin/bash -ex /yuzu/.ci/scripts/format/docker.sh

View File

@@ -0,0 +1,37 @@
#!/bin/bash -ex
if grep -nrI '\s$' src *.yml *.txt *.md Doxyfile .gitignore .gitmodules .ci* dist/*.desktop \
dist/*.svg dist/*.xml; then
echo Trailing whitespace found, aborting
exit 1
fi
# Default clang-format points to default 3.5 version one
CLANG_FORMAT=clang-format-6.0
$CLANG_FORMAT --version
if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then
# Get list of every file modified in this pull request
files_to_lint="$(git diff --name-only --diff-filter=ACMRTUXB $TRAVIS_COMMIT_RANGE | grep '^src/[^.]*[.]\(cpp\|h\)$' || true)"
else
# Check everything for branch pushes
files_to_lint="$(find src/ -name '*.cpp' -or -name '*.h')"
fi
# Turn off tracing for this because it's too verbose
set +x
for f in $files_to_lint; do
d=$(diff -u "$f" <($CLANG_FORMAT "$f") || true)
if ! [ -z "$d" ]; then
echo "!!! $f not compliant to coding style, here is the fix:"
echo "$d"
fail=1
fi
done
set -x
if [ "$fail" = 1 ]; then
exit 1
fi

View File

@@ -0,0 +1,14 @@
#!/bin/bash -ex
cd /yuzu
ccache -s
mkdir build || true && cd build
cmake .. -G Ninja -DYUZU_USE_BUNDLED_UNICORN=ON -DYUZU_USE_QT_WEB_ENGINE=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON
ninja
ccache -s
ctest -VV -C Release

View File

@@ -0,0 +1,5 @@
#!/bin/bash -ex
mkdir -p "ccache" || true
chmod a+x ./.ci/scripts/linux/docker.sh
docker run -e ENABLE_COMPATIBILITY_REPORTING -e CCACHE_DIR=/yuzu/ccache -v $(pwd):/yuzu yuzuemu/build-environments:linux-fresh /bin/bash /yuzu/.ci/scripts/linux/docker.sh

View File

@@ -0,0 +1,14 @@
#!/bin/bash -ex
. .ci/scripts/common/pre-upload.sh
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
ARCHIVE_NAME="${REV_NAME}.tar.xz"
COMPRESSION_FLAGS="-cJvf"
mkdir "$REV_NAME"
cp build/bin/yuzu-cmd "$REV_NAME"
cp build/bin/yuzu "$REV_NAME"
. .ci/scripts/common/post-upload.sh

View File

@@ -0,0 +1,28 @@
# Download all pull requests as patches that match a specific label
# Usage: python download-patches-by-label.py <Label to Match> <Root Path Folder to DL to>
import requests, sys, json, urllib3.request, shutil, subprocess
http = urllib3.PoolManager()
dl_list = {}
def check_individual(labels):
for label in labels:
if (label["name"] == sys.argv[1]):
return True
return False
try:
url = 'https://api.github.com/repos/yuzu-emu/yuzu/pulls'
response = requests.get(url)
if (response.ok):
j = json.loads(response.content)
for pr in j:
if (check_individual(pr["labels"])):
pn = pr["number"]
print("Matched PR# %s" % pn)
print(subprocess.check_output(["git", "fetch", "https://github.com/yuzu-emu/yuzu.git", "pull/%s/head:pr-%s" % (pn, pn), "-f"]))
print(subprocess.check_output(["git", "merge", "--squash", "pr-%s" % pn]))
print(subprocess.check_output(["git", "commit", "-m\"Merge PR %s\"" % pn]))
except:
sys.exit(-1)

View File

@@ -0,0 +1,18 @@
# Checks to see if the specified pull request # has the specified tag
# Usage: python check-label-presence.py <Pull Request ID> <Name of Label>
import requests, json, sys
try:
url = 'https://api.github.com/repos/yuzu-emu/yuzu/issues/%s' % sys.argv[1]
response = requests.get(url)
if (response.ok):
j = json.loads(response.content)
for label in j["labels"]:
if label["name"] == sys.argv[2]:
print('##vso[task.setvariable variable=enabletesting;]true')
sys.exit()
except:
sys.exit(-1)
print('##vso[task.setvariable variable=enabletesting;]false')

View File

@@ -0,0 +1,2 @@
git config --global user.email "yuzu@yuzu-emu.org"
git config --global user.name "yuzubot"

View File

@@ -0,0 +1,50 @@
#!/bin/bash -ex
cd /yuzu
ccache -s
# Dirty hack to trick unicorn makefile into believing we are in a MINGW system
mv /bin/uname /bin/uname1 && echo -e '#!/bin/sh\necho MINGW64' >> /bin/uname
chmod +x /bin/uname
# Dirty hack to trick unicorn makefile into believing we have cmd
echo '' >> /bin/cmd
chmod +x /bin/cmd
mkdir build || true && cd build
cmake .. -G Ninja -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DUSE_CCACHE=ON -DYUZU_USE_BUNDLED_UNICORN=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DCMAKE_BUILD_TYPE=Release
ninja
# Clean up the dirty hacks
rm /bin/uname && mv /bin/uname1 /bin/uname
rm /bin/cmd
ccache -s
echo "Tests skipped"
#ctest -VV -C Release
echo 'Prepare binaries...'
cd ..
mkdir package
QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/'
find build/ -name "yuzu*.exe" -exec cp {} 'package' \;
# copy Qt plugins
mkdir package/platforms
cp "${QT_PLATFORM_DLL_PATH}/qwindows.dll" package/platforms/
cp -rv "${QT_PLATFORM_DLL_PATH}/../mediaservice/" package/
cp -rv "${QT_PLATFORM_DLL_PATH}/../imageformats/" package/
rm -f package/mediaservice/*d.dll
for i in package/*.exe; do
# we need to process pdb here, however, cv2pdb
# does not work here, so we just simply strip all the debug symbols
x86_64-w64-mingw32-strip "${i}"
done
pip3 install pefile
python3 .ci/scripts/windows/scan_dll.py package/*.exe "package/"
python3 .ci/scripts/windows/scan_dll.py package/imageformats/*.dll "package/"

View File

@@ -0,0 +1,5 @@
#!/bin/bash -ex
mkdir -p "ccache" || true
chmod a+x ./.ci/scripts/windows/docker.sh
docker run -e CCACHE_DIR=/yuzu/ccache -v $(pwd):/yuzu yuzuemu/build-environments:linux-mingw /bin/bash -ex /yuzu/.ci/scripts/windows/docker.sh

View File

@@ -0,0 +1,106 @@
import pefile
import sys
import re
import os
import queue
import shutil
# constant definitions
KNOWN_SYS_DLLS = ['WINMM.DLL', 'MSVCRT.DLL', 'VERSION.DLL', 'MPR.DLL',
'DWMAPI.DLL', 'UXTHEME.DLL', 'DNSAPI.DLL', 'IPHLPAPI.DLL']
# below is for Ubuntu 18.04 with specified PPA enabled, if you are using
# other distro or different repositories, change the following accordingly
DLL_PATH = [
'/usr/x86_64-w64-mingw32/bin/',
'/usr/x86_64-w64-mingw32/lib/',
'/usr/lib/gcc/x86_64-w64-mingw32/7.3-posix/'
]
missing = []
def parse_imports(file_name):
results = []
pe = pefile.PE(file_name, fast_load=True)
pe.parse_data_directories()
for entry in pe.DIRECTORY_ENTRY_IMPORT:
current = entry.dll.decode()
current_u = current.upper() # b/c Windows is often case insensitive
# here we filter out system dlls
# dll w/ names like *32.dll are likely to be system dlls
if current_u.upper() not in KNOWN_SYS_DLLS and not re.match(string=current_u, pattern=r'.*32\.DLL'):
results.append(current)
return results
def parse_imports_recursive(file_name, path_list=[]):
q = queue.Queue() # create a FIFO queue
# file_name can be a string or a list for the convience
if isinstance(file_name, str):
q.put(file_name)
elif isinstance(file_name, list):
for i in file_name:
q.put(i)
full_list = []
while q.qsize():
current = q.get_nowait()
print('> %s' % current)
deps = parse_imports(current)
# if this dll does not have any import, ignore it
if not deps:
continue
for dep in deps:
# the dependency already included in the list, skip
if dep in full_list:
continue
# find the requested dll in the provided paths
full_path = find_dll(dep)
if not full_path:
missing.append(dep)
continue
full_list.append(dep)
q.put(full_path)
path_list.append(full_path)
return full_list
def find_dll(name):
for path in DLL_PATH:
for root, _, files in os.walk(path):
for f in files:
if name.lower() == f.lower():
return os.path.join(root, f)
def deploy(name, dst, dry_run=False):
dlls_path = []
parse_imports_recursive(name, dlls_path)
for dll_entry in dlls_path:
if not dry_run:
shutil.copy(dll_entry, dst)
else:
print('[Dry-Run] Copy %s to %s' % (dll_entry, dst))
print('Deploy completed.')
return dlls_path
def main():
if len(sys.argv) < 3:
print('Usage: %s [files to examine ...] [target deploy directory]')
return 1
to_deploy = sys.argv[1:-1]
tgt_dir = sys.argv[-1]
if not os.path.isdir(tgt_dir):
print('%s is not a directory.' % tgt_dir)
return 1
print('Scanning dependencies...')
deploy(to_deploy, tgt_dir)
if missing:
print('Following DLLs are not found: %s' % ('\n'.join(missing)))
return 0
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,13 @@
#!/bin/bash -ex
. .ci/scripts/common/pre-upload.sh
REV_NAME="yuzu-windows-mingw-${GITDATE}-${GITREV}"
ARCHIVE_NAME="${REV_NAME}.tar.gz"
COMPRESSION_FLAGS="-czvf"
mkdir "$REV_NAME"
# get around the permission issues
cp -r package/* "$REV_NAME"
. .ci/scripts/common/post-upload.sh

View File

@@ -0,0 +1,21 @@
parameters:
artifactSource: 'true'
steps:
- task: DockerInstaller@0
displayName: 'Prepare Environment'
inputs:
dockerVersion: '17.09.0-ce'
- task: CacheBeta@0
displayName: 'Cache Build System'
inputs:
key: yuzu-v1-$(BuildName)-$(BuildSuffix)-$(CacheSuffix)
path: $(System.DefaultWorkingDirectory)/ccache
cacheHitVar: CACHE_RESTORED
- script: chmod a+x ./.ci/scripts/$(ScriptFolder)/exec.sh && ./.ci/scripts/$(ScriptFolder)/exec.sh
displayName: 'Build'
- script: chmod a+x ./.ci/scripts/$(ScriptFolder)/upload.sh && ./.ci/scripts/$(ScriptFolder)/upload.sh
displayName: 'Package Artifacts'
- publish: artifacts
artifact: 'yuzu-$(BuildName)-$(BuildSuffix)'
displayName: 'Upload Artifacts'

View File

@@ -0,0 +1,22 @@
jobs:
- job: build
displayName: 'standard'
pool:
vmImage: ubuntu-latest
strategy:
maxParallel: 10
matrix:
windows:
BuildSuffix: 'windows-mingw'
ScriptFolder: 'windows'
linux:
BuildSuffix: 'linux'
ScriptFolder: 'linux'
steps:
- template: ./sync-source.yml
parameters:
artifactSource: $(parameters.artifactSource)
needSubmodules: 'true'
- template: ./build-single.yml
parameters:
artifactSource: 'false'

View File

@@ -0,0 +1,30 @@
jobs:
- job: build_test
displayName: 'testing'
pool:
vmImage: ubuntu-latest
strategy:
maxParallel: 10
matrix:
windows:
BuildSuffix: 'windows-testing'
ScriptFolder: 'windows'
steps:
- task: PythonScript@0
condition: eq(variables['Build.Reason'], 'PullRequest')
displayName: 'Determine Testing Status'
inputs:
scriptSource: 'filePath'
scriptPath: '../scripts/merge/check-label-presence.py'
arguments: '$(System.PullRequest.PullRequestNumber) create-testing-build'
- ${{ if eq(variables.enabletesting, 'true') }}:
- template: ./sync-source.yml
parameters:
artifactSource: $(parameters.artifactSource)
needSubmodules: 'true'
- template: ./mergebot.yml
parameters:
matchLabel: 'testing-merge'
- template: ./build-single.yml
parameters:
artifactSource: 'false'

View File

@@ -0,0 +1,14 @@
parameters:
artifactSource: 'true'
steps:
- template: ./sync-source.yml
parameters:
artifactSource: $(parameters.artifactSource)
needSubmodules: 'false'
- task: DockerInstaller@0
displayName: 'Prepare Environment'
inputs:
dockerVersion: '17.09.0-ce'
- script: chmod a+x ./.ci/scripts/format/exec.sh && ./.ci/scripts/format/exec.sh
displayName: 'Verify Formatting'

46
.ci/templates/merge.yml Normal file
View File

@@ -0,0 +1,46 @@
jobs:
- job: merge
displayName: 'pull requests'
steps:
- checkout: self
submodules: recursive
- template: ./mergebot.yml
parameters:
matchLabel: '$(BuildName)-merge'
- task: ArchiveFiles@2
displayName: 'Package Source'
inputs:
rootFolderOrFile: '$(System.DefaultWorkingDirectory)'
includeRootFolder: false
archiveType: '7z'
archiveFile: '$(Build.ArtifactStagingDirectory)/yuzu-$(BuildName)-source.7z'
- task: PublishPipelineArtifact@1
displayName: 'Upload Artifacts'
inputs:
targetPath: '$(Build.ArtifactStagingDirectory)/yuzu-$(BuildName)-source.7z'
artifact: 'yuzu-$(BuildName)-source'
replaceExistingArchive: true
- job: upload_source
displayName: 'upload'
dependsOn: merge
steps:
- template: ./sync-source.yml
parameters:
artifactSource: 'true'
needSubmodules: 'true'
- script: chmod a+x $(System.DefaultWorkingDirectory)/.ci/scripts/merge/yuzubot-git-config.sh && $(System.DefaultWorkingDirectory)/.ci/scripts/merge/yuzubot-git-config.sh
displayName: 'Apply Git Configuration'
- script: git tag -a $(BuildName)-$(Build.BuildId) -m "yuzu $(BuildName) $(Build.BuildNumber) $(Build.DefinitionName)"
displayName: 'Tag Source'
- script: git remote add other $(GitRepoPushChangesURL)
displayName: 'Register Repository'
- script: git push --follow-tags --force other HEAD:$(GitPushBranch)
displayName: 'Update Code'
- script: git rev-list -n 1 $(BuildName)-$(Build.BuildId) > $(Build.ArtifactStagingDirectory)/tag-commit.sha
displayName: 'Calculate Release Point'
- task: PublishPipelineArtifact@1
displayName: 'Upload Release Point'
inputs:
targetPath: '$(Build.ArtifactStagingDirectory)/tag-commit.sha'
artifact: 'yuzu-$(BuildName)-release-point'
replaceExistingArchive: true

View File

@@ -0,0 +1,15 @@
parameters:
matchLabel: 'dummy-merge'
steps:
- script: mkdir $(System.DefaultWorkingDirectory)/patches && pip install requests urllib3
displayName: 'Prepare Environment'
- script: chmod a+x $(System.DefaultWorkingDirectory)/.ci/scripts/merge/yuzubot-git-config.sh && $(System.DefaultWorkingDirectory)/.ci/scripts/merge/yuzubot-git-config.sh
displayName: 'Apply Git Configuration'
- task: PythonScript@0
displayName: 'Discover, Download, and Apply Patches'
inputs:
scriptSource: 'filePath'
scriptPath: '.ci/scripts/merge/apply-patches-by-label.py'
arguments: '${{ parameters.matchLabel }} patches'
workingDirectory: '$(System.DefaultWorkingDirectory)'

29
.ci/templates/release.yml Normal file
View File

@@ -0,0 +1,29 @@
steps:
- task: DownloadPipelineArtifact@2
displayName: 'Download Windows Release'
inputs:
artifactName: 'yuzu-$(BuildName)-windows-mingw'
buildType: 'current'
targetPath: '$(Build.ArtifactStagingDirectory)'
- task: DownloadPipelineArtifact@2
displayName: 'Download Linux Release'
inputs:
artifactName: 'yuzu-$(BuildName)-linux'
buildType: 'current'
targetPath: '$(Build.ArtifactStagingDirectory)'
- task: DownloadPipelineArtifact@2
displayName: 'Download Release Point'
inputs:
artifactName: 'yuzu-$(BuildName)-release-point'
buildType: 'current'
targetPath: '$(Build.ArtifactStagingDirectory)'
- script: echo '##vso[task.setvariable variable=tagcommit]' && cat $(Build.ArtifactStagingDirectory)/tag-commit.sha
displayName: 'Calculate Release Point'
- task: GitHubRelease@0
inputs:
gitHubConnection: $(GitHubReleaseConnectionName)
repositoryName: '$(GitHubReleaseRepoName)'
action: 'create'
target: $(variables.tagcommit)
title: 'yuzu $(BuildName) #$(Build.BuildId)'
assets: '$(Build.ArtifactStagingDirectory)/*'

View File

@@ -0,0 +1,16 @@
steps:
- checkout: none
- task: DownloadPipelineArtifact@2
displayName: 'Download Source'
inputs:
artifactName: 'yuzu-$(BuildName)-source'
buildType: 'current'
targetPath: '$(Build.ArtifactStagingDirectory)'
- script: rm -rf $(System.DefaultWorkingDirectory) && mkdir $(System.DefaultWorkingDirectory)
displayName: 'Clean Working Directory'
- task: ExtractFiles@1
displayName: 'Prepare Source'
inputs:
archiveFilePatterns: '$(Build.ArtifactStagingDirectory)/*.7z'
destinationFolder: '$(System.DefaultWorkingDirectory)'
cleanDestinationFolder: false

View File

@@ -0,0 +1,11 @@
parameters:
needSubmodules: 'true'
steps:
- checkout: self
displayName: 'Checkout Recursive'
submodules: recursive
# condition: eq(parameters.needSubmodules, 'true')
#- checkout: self
# displayName: 'Checkout Fast'
# condition: ne(parameters.needSubmodules, 'true')

View File

@@ -0,0 +1,7 @@
steps:
- ${{ if eq(parameters.artifactSource, 'true') }}:
- template: ./retrieve-artifact-source.yml
- ${{ if ne(parameters.artifactSource, 'true') }}:
- template: ./retrieve-master-source.yml
parameters:
needSubmodules: $(parameters.needSubmodules)

23
.ci/yuzu-mainline.yml Normal file
View File

@@ -0,0 +1,23 @@
trigger:
- master
stages:
- stage: merge
displayName: 'merge'
jobs:
- template: ./templates/merge.yml
- stage: format
dependsOn: merge
displayName: 'format'
jobs:
- job: format
displayName: 'clang'
pool:
vmImage: ubuntu-latest
steps:
- template: ./templates/format-check.yml
- stage: build
displayName: 'build'
dependsOn: format
jobs:
- template: ./templates/build-standard.yml

19
.ci/yuzu-patreon.yml Normal file
View File

@@ -0,0 +1,19 @@
# Starter pipeline
# Start with a minimal pipeline that you can customize to build and deploy your code.
# Add steps that build, run tests, deploy, and more:
# https://aka.ms/yaml
trigger:
- master
pool:
vmImage: 'ubuntu-latest'
steps:
- script: echo Hello, world!
displayName: 'Run a one-line script'
- script: |
echo Add other tasks to build, test, and deploy your project.
echo See https://aka.ms/yaml
displayName: 'Run a multi-line script'

19
.ci/yuzu-repo-sync.yml Normal file
View File

@@ -0,0 +1,19 @@
trigger:
- master
jobs:
- job: copy
displayName: 'Sync Repository'
pool:
vmImage: 'ubuntu-latest'
steps:
- script: echo 'https://$(GitUsername):$(GitAccessToken)@dev.azure.com' > $HOME/.git-credentials
displayName: 'Load Credentials'
- script: git config --global credential.helper store
displayName: 'Register Credential Helper'
- script: git remote add other $(GitRepoPushChangesURL)
displayName: 'Register Repository'
- script: git push --force other HEAD:$(GitPushBranch)
displayName: 'Update Code'
- script: rm -rf $HOME/.git-credentials
displayName: 'Clear Cached Credentials'

18
.ci/yuzu-verify.yml Normal file
View File

@@ -0,0 +1,18 @@
stages:
- stage: format
displayName: 'format'
jobs:
- job: format
displayName: 'clang'
pool:
vmImage: ubuntu-latest
steps:
- template: ./templates/format-check.yml
parameters:
artifactSource: 'false'
- stage: build
displayName: 'build'
dependsOn: format
jobs:
- template: ./templates/build-standard.yml
- template: ./templates/build-testing.yml

View File

@@ -70,6 +70,7 @@ set(HASH_FILES
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
"${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
@@ -81,6 +82,8 @@ set(HASH_FILES
"${VIDEO_CORE}/shader/decode/shift.cpp"
"${VIDEO_CORE}/shader/decode/video.cpp"
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/node.h"
"${VIDEO_CORE}/shader/node_helper.cpp"

View File

@@ -73,13 +73,15 @@ private:
EffectInStatus info{};
};
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params,
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event)
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event,
std::size_t instance_number)
: worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
effects(params.effect_count) {
audio_out = std::make_unique<AudioCore::AudioOut>();
stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
"AudioRenderer", [=]() { buffer_event->Signal(); });
fmt::format("AudioRenderer-Instance{}", instance_number),
[=]() { buffer_event->Signal(); });
audio_out->StartStream(stream);
QueueMixedBuffer(0);
@@ -217,13 +219,15 @@ std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_co
if (offset == samples.size()) {
offset = 0;
if (!wave_buffer.is_looping) {
if (!wave_buffer.is_looping && wave_buffer.buffer_sz) {
SetWaveIndex(wave_index + 1);
}
out_status.wave_buffer_consumed++;
if (wave_buffer.buffer_sz) {
out_status.wave_buffer_consumed++;
}
if (wave_buffer.end_of_stream) {
if (wave_buffer.end_of_stream || wave_buffer.buffer_sz == 0) {
info.play_state = PlayState::Paused;
}
}

View File

@@ -215,7 +215,8 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size
class AudioRenderer {
public:
AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params,
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event);
Kernel::SharedPtr<Kernel::WritableEvent> buffer_event,
std::size_t instance_number);
~AudioRenderer();
std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params);

View File

@@ -105,7 +105,7 @@ void Stream::PlayNextBuffer() {
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
core_timing.ScheduleEventThreadsafe(GetBufferReleaseCycles(*active_buffer), release_event, {});
core_timing.ScheduleEvent(GetBufferReleaseCycles(*active_buffer), release_event, {});
}
void Stream::ReleaseActiveBuffer() {

View File

@@ -44,6 +44,7 @@ add_custom_command(OUTPUT scm_rev.cpp
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
"${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
@@ -55,6 +56,8 @@ add_custom_command(OUTPUT scm_rev.cpp
"${VIDEO_CORE}/shader/decode/shift.cpp"
"${VIDEO_CORE}/shader/decode/video.cpp"
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/node.h"
"${VIDEO_CORE}/shader/node_helper.cpp"
@@ -74,6 +77,7 @@ add_library(common STATIC
assert.h
detached_tasks.cpp
detached_tasks.h
binary_find.h
bit_field.h
bit_util.h
cityhash.cpp

View File

@@ -3,6 +3,7 @@
#pragma once
#include <cstddef>
#include <memory>
#include <type_traits>
namespace Common {
@@ -19,6 +20,12 @@ constexpr T AlignDown(T value, std::size_t size) {
return static_cast<T>(value - value % size);
}
template <typename T>
constexpr T AlignBits(T value, std::size_t align) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align);
}
template <typename T>
constexpr bool Is4KBAligned(T value) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
@@ -31,4 +38,63 @@ constexpr bool IsWordAligned(T value) {
return (value & 0b11) == 0;
}
template <typename T, std::size_t Align = 16>
class AlignmentAllocator {
public:
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
public:
pointer address(reference r) noexcept {
return std::addressof(r);
}
const_pointer address(const_reference r) const noexcept {
return std::addressof(r);
}
pointer allocate(size_type n) {
return static_cast<pointer>(::operator new (n, std::align_val_t{Align}));
}
void deallocate(pointer p, size_type) {
::operator delete (p, std::align_val_t{Align});
}
void construct(pointer p, const value_type& wert) {
new (p) value_type(wert);
}
void destroy(pointer p) {
p->~value_type();
}
size_type max_size() const noexcept {
return size_type(-1) / sizeof(value_type);
}
template <typename T2>
struct rebind {
using other = AlignmentAllocator<T2, Align>;
};
bool operator!=(const AlignmentAllocator<T, Align>& other) const noexcept {
return !(*this == other);
}
// Returns true if and only if storage allocated from *this
// can be deallocated from other, and vice versa.
// Always returns true for stateless allocators.
bool operator==(const AlignmentAllocator<T, Align>& other) const noexcept {
return true;
}
};
} // namespace Common

21
src/common/binary_find.h Normal file
View File

@@ -0,0 +1,21 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <algorithm>
namespace Common {
template <class ForwardIt, class T, class Compare = std::less<>>
ForwardIt BinaryFind(ForwardIt first, ForwardIt last, const T& value, Compare comp = {}) {
// Note: BOTH type T and the type after ForwardIt is dereferenced
// must be implicitly convertible to BOTH Type1 and Type2, used in Compare.
// This is stricter than lower_bound requirement (see above)
first = std::lower_bound(first, last, value, comp);
return first != last && !comp(value, *first) ? first : last;
}
} // namespace Common

View File

@@ -97,4 +97,48 @@ inline u32 CountTrailingZeroes64(u64 value) {
}
#endif
#ifdef _MSC_VER
inline u32 MostSignificantBit32(const u32 value) {
unsigned long result;
_BitScanReverse(&result, value);
return static_cast<u32>(result);
}
inline u32 MostSignificantBit64(const u64 value) {
unsigned long result;
_BitScanReverse64(&result, value);
return static_cast<u32>(result);
}
#else
inline u32 MostSignificantBit32(const u32 value) {
return 31U - static_cast<u32>(__builtin_clz(value));
}
inline u32 MostSignificantBit64(const u64 value) {
return 63U - static_cast<u32>(__builtin_clzll(value));
}
#endif
inline u32 Log2Floor32(const u32 value) {
return MostSignificantBit32(value);
}
inline u32 Log2Ceil32(const u32 value) {
const u32 log2_f = Log2Floor32(value);
return log2_f + ((value ^ (1U << log2_f)) != 0U);
}
inline u32 Log2Floor64(const u64 value) {
return MostSignificantBit64(value);
}
inline u32 Log2Ceil64(const u64 value) {
const u64 log2_f = static_cast<u64>(Log2Floor64(value));
return static_cast<u32>(log2_f + ((value ^ (1ULL << log2_f)) != 0ULL));
}
} // namespace Common

View File

@@ -4,6 +4,7 @@
#pragma once
#include <algorithm>
#include <string>
#if !defined(ARCHITECTURE_x86_64)

View File

@@ -175,6 +175,7 @@ add_library(core STATIC
hle/service/acc/acc_u0.h
hle/service/acc/acc_u1.cpp
hle/service/acc/acc_u1.h
hle/service/acc/errors.h
hle/service/acc/profile_manager.cpp
hle/service/acc/profile_manager.h
hle/service/am/am.cpp
@@ -207,6 +208,8 @@ add_library(core STATIC
hle/service/aoc/aoc_u.h
hle/service/apm/apm.cpp
hle/service/apm/apm.h
hle/service/apm/controller.cpp
hle/service/apm/controller.h
hle/service/apm/interface.cpp
hle/service/apm/interface.h
hle/service/audio/audctl.cpp
@@ -270,6 +273,7 @@ add_library(core STATIC
hle/service/filesystem/fsp_srv.h
hle/service/fgm/fgm.cpp
hle/service/fgm/fgm.h
hle/service/friend/errors.h
hle/service/friend/friend.cpp
hle/service/friend/friend.h
hle/service/friend/interface.cpp
@@ -291,6 +295,7 @@ add_library(core STATIC
hle/service/hid/irs.h
hle/service/hid/xcd.cpp
hle/service/hid/xcd.h
hle/service/hid/errors.h
hle/service/hid/controllers/controller_base.cpp
hle/service/hid/controllers/controller_base.h
hle/service/hid/controllers/debug_pad.cpp
@@ -429,6 +434,8 @@ add_library(core STATIC
hle/service/time/interface.h
hle/service/time/time.cpp
hle/service/time/time.h
hle/service/time/time_sharedmemory.cpp
hle/service/time/time_sharedmemory.h
hle/service/usb/usb.cpp
hle/service/usb/usb.h
hle/service/vi/display/vi_display.cpp

View File

@@ -44,13 +44,6 @@ public:
/// Step CPU by one instruction
virtual void Step() = 0;
/// Maps a backing memory region for the CPU
virtual void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
Kernel::VMAPermission perms) = 0;
/// Unmaps a region of memory that was previously mapped using MapBackingMemory
virtual void UnmapMemory(VAddr address, std::size_t size) = 0;
/// Clear all instruction cache
virtual void ClearInstructionCache() = 0;

View File

@@ -177,15 +177,6 @@ ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor,
ARM_Dynarmic::~ARM_Dynarmic() = default;
void ARM_Dynarmic::MapBackingMemory(u64 address, std::size_t size, u8* memory,
Kernel::VMAPermission perms) {
inner_unicorn.MapBackingMemory(address, size, memory, perms);
}
void ARM_Dynarmic::UnmapMemory(u64 address, std::size_t size) {
inner_unicorn.UnmapMemory(address, size);
}
void ARM_Dynarmic::SetPC(u64 pc) {
jit->SetPC(pc);
}

View File

@@ -23,9 +23,6 @@ public:
ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ARM_Dynarmic() override;
void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
Kernel::VMAPermission perms) override;
void UnmapMemory(u64 address, std::size_t size) override;
void SetPC(u64 pc) override;
u64 GetPC() const override;
u64 GetReg(int index) const override;

View File

@@ -50,11 +50,14 @@ static void CodeHook(uc_engine* uc, uint64_t address, uint32_t size, void* user_
static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int size, u64 value,
void* user_data) {
auto* const system = static_cast<System*>(user_data);
ARM_Interface::ThreadContext ctx{};
Core::CurrentArmInterface().SaveContext(ctx);
system->CurrentArmInterface().SaveContext(ctx);
ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x{:X}, pc=0x{:X}, lr=0x{:X}", addr,
ctx.pc, ctx.cpu_registers[30]);
return {};
return false;
}
ARM_Unicorn::ARM_Unicorn(System& system) : system{system} {
@@ -65,7 +68,7 @@ ARM_Unicorn::ARM_Unicorn(System& system) : system{system} {
uc_hook hook{};
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, -1));
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, this, 0, -1));
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0, -1));
if (GDBStub::IsServerEnabled()) {
CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, -1));
last_bkpt_hit = false;
@@ -76,15 +79,6 @@ ARM_Unicorn::~ARM_Unicorn() {
CHECKED(uc_close(uc));
}
void ARM_Unicorn::MapBackingMemory(VAddr address, std::size_t size, u8* memory,
Kernel::VMAPermission perms) {
CHECKED(uc_mem_map_ptr(uc, address, size, static_cast<u32>(perms), memory));
}
void ARM_Unicorn::UnmapMemory(VAddr address, std::size_t size) {
CHECKED(uc_mem_unmap(uc, address, size));
}
void ARM_Unicorn::SetPC(u64 pc) {
CHECKED(uc_reg_write(uc, UC_ARM64_REG_PC, &pc));
}

View File

@@ -18,9 +18,6 @@ public:
explicit ARM_Unicorn(System& system);
~ARM_Unicorn() override;
void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
Kernel::VMAPermission perms) override;
void UnmapMemory(VAddr address, std::size_t size) override;
void SetPC(u64 pc) override;
u64 GetPC() const override;
u64 GetReg(int index) const override;

View File

@@ -25,6 +25,7 @@
#include "core/hle/kernel/scheduler.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/am/applets/applets.h"
#include "core/hle/service/apm/controller.h"
#include "core/hle/service/glue/manager.h"
#include "core/hle/service/service.h"
#include "core/hle/service/sm/sm.h"
@@ -143,7 +144,7 @@ struct System::Impl {
telemetry_session = std::make_unique<Core::TelemetrySession>();
service_manager = std::make_shared<Service::SM::ServiceManager>();
Service::Init(service_manager, system, *virtual_filesystem);
Service::Init(service_manager, system);
GDBStub::Init();
renderer = VideoCore::CreateRenderer(emu_window, system);
@@ -306,6 +307,9 @@ struct System::Impl {
/// Frontend applets
Service::AM::Applets::AppletManager applet_manager;
/// APM (Performance) services
Service::APM::Controller apm_controller{core_timing};
/// Glue services
Service::Glue::ARPManager arp_manager;
@@ -568,6 +572,14 @@ const Service::Glue::ARPManager& System::GetARPManager() const {
return impl->arp_manager;
}
Service::APM::Controller& System::GetAPMController() {
return impl->apm_controller;
}
const Service::APM::Controller& System::GetAPMController() const {
return impl->apm_controller;
}
System::ResultStatus System::Init(Frontend::EmuWindow& emu_window) {
return impl->Init(*this, emu_window);
}

View File

@@ -43,6 +43,10 @@ struct AppletFrontendSet;
class AppletManager;
} // namespace AM::Applets
namespace APM {
class Controller;
}
namespace Glue {
class ARPManager;
}
@@ -296,6 +300,10 @@ public:
const Service::Glue::ARPManager& GetARPManager() const;
Service::APM::Controller& GetAPMController();
const Service::APM::Controller& GetAPMController() const;
private:
System();
@@ -319,10 +327,6 @@ private:
static System s_instance;
};
inline ARM_Interface& CurrentArmInterface() {
return System::GetInstance().CurrentArmInterface();
}
inline Kernel::Process* CurrentProcess() {
return System::GetInstance().CurrentProcess();
}

View File

@@ -53,16 +53,12 @@ bool CpuBarrier::Rendezvous() {
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
std::size_t core_index)
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
if (Settings::values.use_cpu_jit) {
#ifdef ARCHITECTURE_x86_64
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
#else
arm_interface = std::make_unique<ARM_Unicorn>(system);
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
arm_interface = std::make_unique<ARM_Unicorn>(system);
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
#endif
} else {
arm_interface = std::make_unique<ARM_Unicorn>(system);
}
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface);
}
@@ -70,15 +66,12 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
Cpu::~Cpu() = default;
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) {
if (Settings::values.use_cpu_jit) {
#ifdef ARCHITECTURE_x86_64
return std::make_unique<DynarmicExclusiveMonitor>(num_cores);
return std::make_unique<DynarmicExclusiveMonitor>(num_cores);
#else
return nullptr; // TODO(merry): Passthrough exclusive monitor
// TODO(merry): Passthrough exclusive monitor
return nullptr;
#endif
} else {
return nullptr; // TODO(merry): Passthrough exclusive monitor
}
}
void Cpu::RunLoop(bool tight_loop) {

View File

@@ -56,12 +56,12 @@ void CoreTiming::Initialize() {
}
void CoreTiming::Shutdown() {
MoveEvents();
ClearPendingEvents();
UnregisterAllEvents();
}
EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
std::lock_guard guard{inner_mutex};
// check for existing type with same name.
// we want event type names to remain unique so that we can use them for serialization.
ASSERT_MSG(event_types.find(name) == event_types.end(),
@@ -82,6 +82,7 @@ void CoreTiming::UnregisterAllEvents() {
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
ASSERT(event_type != nullptr);
std::lock_guard guard{inner_mutex};
const s64 timeout = GetTicks() + cycles_into_future;
// If this event needs to be scheduled before the next advance(), force one early
@@ -93,12 +94,8 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
void CoreTiming::ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
u64 userdata) {
ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
}
void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type == event_type && e.userdata == userdata;
});
@@ -110,10 +107,6 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
}
}
void CoreTiming::UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) {
unschedule_queue.Push(std::make_pair(event_type, userdata));
}
u64 CoreTiming::GetTicks() const {
u64 ticks = static_cast<u64>(global_timer);
if (!is_global_timer_sane) {
@@ -135,6 +128,7 @@ void CoreTiming::ClearPendingEvents() {
}
void CoreTiming::RemoveEvent(const EventType* event_type) {
std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
[&](const Event& e) { return e.type == event_type; });
@@ -145,11 +139,6 @@ void CoreTiming::RemoveEvent(const EventType* event_type) {
}
}
void CoreTiming::RemoveNormalAndThreadsafeEvent(const EventType* event_type) {
MoveEvents();
RemoveEvent(event_type);
}
void CoreTiming::ForceExceptionCheck(s64 cycles) {
cycles = std::max<s64>(0, cycles);
if (downcount <= cycles) {
@@ -162,19 +151,8 @@ void CoreTiming::ForceExceptionCheck(s64 cycles) {
downcount = static_cast<int>(cycles);
}
void CoreTiming::MoveEvents() {
for (Event ev; ts_queue.Pop(ev);) {
ev.fifo_order = event_fifo_id++;
event_queue.emplace_back(std::move(ev));
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
}
void CoreTiming::Advance() {
MoveEvents();
for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) {
UnscheduleEvent(ev.first, ev.second);
}
std::unique_lock<std::mutex> guard(inner_mutex);
const int cycles_executed = slice_length - downcount;
global_timer += cycles_executed;
@@ -186,7 +164,9 @@ void CoreTiming::Advance() {
Event evt = std::move(event_queue.front());
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
inner_mutex.unlock();
evt.type->callback(evt.userdata, global_timer - evt.time);
inner_mutex.lock();
}
is_global_timer_sane = false;

View File

@@ -6,6 +6,7 @@
#include <chrono>
#include <functional>
#include <mutex>
#include <string>
#include <unordered_map>
#include <vector>
@@ -67,7 +68,7 @@ public:
///
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
/// Unregisters all registered events thus far.
/// Unregisters all registered events thus far. Note: not thread unsafe
void UnregisterAllEvents();
/// After the first Advance, the slice lengths and the downcount will be reduced whenever an
@@ -76,20 +77,10 @@ public:
/// Scheduling from a callback will not update the downcount until the Advance() completes.
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
/// This is to be called when outside of hle threads, such as the graphics thread, wants to
/// schedule things to be executed on the main thread.
///
/// @note This doesn't change slice_length and thus events scheduled by this might be
/// called with a delay of up to MAX_SLICE_LENGTH
void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
u64 userdata = 0);
void UnscheduleEvent(const EventType* event_type, u64 userdata);
void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata);
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const EventType* event_type);
void RemoveNormalAndThreadsafeEvent(const EventType* event_type);
void ForceExceptionCheck(s64 cycles);
@@ -120,7 +111,6 @@ private:
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
void MoveEvents();
s64 global_timer = 0;
s64 idled_cycles = 0;
@@ -143,14 +133,9 @@ private:
// remain stable regardless of rehashes/resizing.
std::unordered_map<std::string, EventType> event_types;
// The queue for storing the events from other threads threadsafe until they will be added
// to the event_queue by the emu thread
Common::MPSCQueue<Event> ts_queue;
// The queue for unscheduling the events from other threads threadsafe
Common::MPSCQueue<std::pair<const EventType*, u64>> unschedule_queue;
EventType* ev_lost = nullptr;
std::mutex inner_mutex;
};
} // namespace Core::Timing

View File

@@ -35,9 +35,9 @@ enum class ContentRecordType : u8 {
Program = 1,
Data = 2,
Control = 3,
Manual = 4,
Legal = 5,
Patch = 6,
HtmlDocument = 4,
LegalInformation = 5,
DeltaFragment = 6,
};
struct ContentRecord {

View File

@@ -94,6 +94,10 @@ u64 ProgramMetadata::GetFilesystemPermissions() const {
return aci_file_access.permissions;
}
u32 ProgramMetadata::GetSystemResourceSize() const {
return npdm_header.system_resource_size;
}
const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const {
return aci_kernel_capabilities;
}

View File

@@ -58,6 +58,7 @@ public:
u32 GetMainThreadStackSize() const;
u64 GetTitleID() const;
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
void Print() const;
@@ -76,7 +77,8 @@ private:
u8 reserved_3;
u8 main_thread_priority;
u8 main_thread_cpu;
std::array<u8, 8> reserved_4;
std::array<u8, 4> reserved_4;
u32_le system_resource_size;
u32_le process_category;
u32_le main_stack_size;
std::array<u8, 0x10> application_name;

View File

@@ -99,7 +99,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
return ContentRecordType::Data;
case NCAContentType::Manual:
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
return ContentRecordType::Manual;
return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast<u8>(type));
}
@@ -397,8 +397,8 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex
});
if (meta_iter == ncas.end()) {
LOG_ERROR(Loader, "The XCI you are attempting to install does not have a metadata NCA and "
"is therefore malformed. Double check your encryption keys.");
LOG_ERROR(Loader, "The file you are attempting to install does not have a metadata NCA and "
"is therefore malformed. Check your encryption keys.");
return InstallResult::ErrorMetaFailed;
}
@@ -415,6 +415,9 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex
const auto cnmt_file = section0->GetFiles()[0];
const CNMT cnmt(cnmt_file);
for (const auto& record : cnmt.GetContentRecords()) {
// Ignore DeltaFragments, they are not useful to us
if (record.type == ContentRecordType::DeltaFragment)
continue;
const auto nca = GetNCAFromNSPForID(nsp, record.nca_id);
if (nca == nullptr)
return InstallResult::ErrorCopyFailed;

View File

@@ -248,10 +248,13 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
auto next_file = pfs->GetFile(fmt::format("{}.nca", id_string));
if (next_file == nullptr) {
LOG_WARNING(Service_FS,
"NCA with ID {}.nca is listed in content metadata, but cannot "
"be found in PFS. NSP appears to be corrupted.",
id_string);
if (rec.type != ContentRecordType::DeltaFragment) {
LOG_WARNING(Service_FS,
"NCA with ID {}.nca is listed in content metadata, but cannot "
"be found in PFS. NSP appears to be corrupted.",
id_string);
}
continue;
}

View File

@@ -8,6 +8,7 @@
#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/physical_memory.h"
namespace Kernel {
@@ -77,7 +78,7 @@ struct CodeSet final {
}
/// The overall data that backs this code set.
std::vector<u8> memory;
Kernel::PhysicalMemory memory;
/// The segments that comprise this code set.
std::array<Segment, 3> segments;

View File

@@ -0,0 +1,19 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/alignment.h"
namespace Kernel {
// This encapsulation serves 2 purposes:
// - First, to encapsulate host physical memory under a single type and set an
// standard for managing it.
// - Second to ensure all host backing memory used is aligned to 256 bytes due
// to strict alignment restrictions on GPU memory.
using PhysicalMemory = std::vector<u8, Common::AlignmentAllocator<u8, 256>>;
} // namespace Kernel

View File

@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
#include <bitset>
#include <memory>
#include <random>
#include "common/alignment.h"
@@ -48,8 +49,58 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
}
} // Anonymous namespace
SharedPtr<Process> Process::Create(Core::System& system, std::string name,
Process::ProcessType type) {
// Represents a page used for thread-local storage.
//
// Each TLS page contains slots that may be used by processes and threads.
// Every process and thread is created with a slot in some arbitrary page
// (whichever page happens to have an available slot).
class TLSPage {
public:
static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE;
explicit TLSPage(VAddr address) : base_address{address} {}
bool HasAvailableSlots() const {
return !is_slot_used.all();
}
VAddr GetBaseAddress() const {
return base_address;
}
std::optional<VAddr> ReserveSlot() {
for (std::size_t i = 0; i < is_slot_used.size(); i++) {
if (is_slot_used[i]) {
continue;
}
is_slot_used[i] = true;
return base_address + (i * Memory::TLS_ENTRY_SIZE);
}
return std::nullopt;
}
void ReleaseSlot(VAddr address) {
// Ensure that all given addresses are consistent with how TLS pages
// are intended to be used when releasing slots.
ASSERT(IsWithinPage(address));
ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0);
const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE;
is_slot_used[index] = false;
}
private:
bool IsWithinPage(VAddr address) const {
return base_address <= address && address < base_address + Memory::PAGE_SIZE;
}
VAddr base_address;
std::bitset<num_slot_entries> is_slot_used;
};
SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
auto& kernel = system.Kernel();
SharedPtr<Process> process(new Process(system));
@@ -78,20 +129,17 @@ u64 Process::GetTotalPhysicalMemoryAvailable() const {
return vm_manager.GetTotalPhysicalMemoryAvailable();
}
u64 Process::GetTotalPhysicalMemoryAvailableWithoutMmHeap() const {
// TODO: Subtract the personal heap size from this when the
// personal heap is implemented.
return GetTotalPhysicalMemoryAvailable();
u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
}
u64 Process::GetTotalPhysicalMemoryUsed() const {
return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size +
GetSystemResourceUsage();
}
u64 Process::GetTotalPhysicalMemoryUsedWithoutMmHeap() const {
// TODO: Subtract the personal heap size from this when the
// personal heap is implemented.
return GetTotalPhysicalMemoryUsed();
u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
}
void Process::RegisterThread(const Thread* thread) {
@@ -121,6 +169,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore();
is_64bit_process = metadata.Is64BitProgram();
system_resource_size = metadata.GetSystemResourceSize();
vm_manager.Reset(metadata.GetAddressSpaceType());
@@ -135,19 +184,11 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
}
void Process::Run(s32 main_thread_priority, u64 stack_size) {
// The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
// Allocate and map the main thread stack
// TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
// of the user address space.
const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
vm_manager
.MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
0, main_thread_stack_size, MemoryState::Stack)
.Unwrap();
AllocateMainThreadStack(stack_size);
tls_region_address = CreateTLSRegion();
vm_manager.LogLayout();
ChangeStatus(ProcessStatus::Running);
SetupMainThread(*this, kernel, main_thread_priority);
@@ -177,69 +218,66 @@ void Process::PrepareForTermination() {
stop_threads(system.Scheduler(2).GetThreadList());
stop_threads(system.Scheduler(3).GetThreadList());
FreeTLSRegion(tls_region_address);
tls_region_address = 0;
ChangeStatus(ProcessStatus::Exited);
}
/**
* Finds a free location for the TLS section of a thread.
* @param tls_slots The TLS page array of the thread's owner process.
* Returns a tuple of (page, slot, alloc_needed) where:
* page: The index of the first allocated TLS page that has free slots.
* slot: The index of the first free slot in the indicated page.
* alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
* Attempts to find a TLS page that contains a free slot for
* use by a thread.
*
* @returns If a page with an available slot is found, then an iterator
* pointing to the page is returned. Otherwise the end iterator
* is returned instead.
*/
static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot(
const std::vector<std::bitset<8>>& tls_slots) {
// Iterate over all the allocated pages, and try to find one where not all slots are used.
for (std::size_t page = 0; page < tls_slots.size(); ++page) {
const auto& page_tls_slots = tls_slots[page];
if (!page_tls_slots.all()) {
// We found a page with at least one free slot, find which slot it is
for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
if (!page_tls_slots.test(slot)) {
return std::make_tuple(page, slot, false);
}
}
}
}
return std::make_tuple(0, 0, true);
static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
return std::find_if(tls_pages.begin(), tls_pages.end(),
[](const auto& page) { return page.HasAvailableSlots(); });
}
VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) {
auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots);
const VAddr tls_begin = vm_manager.GetTLSIORegionBaseAddress();
VAddr Process::CreateTLSRegion() {
auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages);
if (needs_allocation) {
tls_slots.emplace_back(0); // The page is completely available at the start
available_page = tls_slots.size() - 1;
available_slot = 0; // Use the first slot in the new page
if (tls_page_iter == tls_pages.cend()) {
const auto region_address =
vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(),
vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE);
ASSERT(region_address.Succeeded());
// Allocate some memory from the end of the linear heap for this region.
auto& tls_memory = thread.GetTLSMemory();
tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0);
const auto map_result = vm_manager.MapMemoryBlock(
*region_address, std::make_shared<PhysicalMemory>(Memory::PAGE_SIZE), 0,
Memory::PAGE_SIZE, MemoryState::ThreadLocal);
ASSERT(map_result.Succeeded());
vm_manager.RefreshMemoryBlockMappings(tls_memory.get());
tls_pages.emplace_back(*region_address);
vm_manager.MapMemoryBlock(tls_begin + available_page * Memory::PAGE_SIZE, tls_memory, 0,
Memory::PAGE_SIZE, MemoryState::ThreadLocal);
const auto reserve_result = tls_pages.back().ReserveSlot();
ASSERT(reserve_result.has_value());
return *reserve_result;
}
tls_slots[available_page].set(available_slot);
return tls_begin + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
return *tls_page_iter->ReserveSlot();
}
void Process::FreeTLSSlot(VAddr tls_address) {
const VAddr tls_base = tls_address - vm_manager.GetTLSIORegionBaseAddress();
const VAddr tls_page = tls_base / Memory::PAGE_SIZE;
const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
void Process::FreeTLSRegion(VAddr tls_address) {
const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE);
auto iter =
std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
return page.GetBaseAddress() == aligned_address;
});
tls_slots[tls_page].reset(tls_slot);
// Something has gone very wrong if we're freeing a region
// with no actual page available.
ASSERT(iter != tls_pages.cend());
iter->ReleaseSlot(tls_address);
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory));
const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
MemoryState memory_state) {
@@ -282,4 +320,16 @@ void Process::ChangeStatus(ProcessStatus new_status) {
WakeupAllWaitingThreads();
}
void Process::AllocateMainThreadStack(u64 stack_size) {
// The kernel always ensures that the given stack size is page aligned.
main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
// Allocate and map the main thread stack
const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
vm_manager
.MapMemoryBlock(mapping_address, std::make_shared<PhysicalMemory>(main_thread_stack_size),
0, main_thread_stack_size, MemoryState::Stack)
.Unwrap();
}
} // namespace Kernel

View File

@@ -5,7 +5,6 @@
#pragma once
#include <array>
#include <bitset>
#include <cstddef>
#include <list>
#include <string>
@@ -32,6 +31,7 @@ namespace Kernel {
class KernelCore;
class ResourceLimit;
class Thread;
class TLSPage;
struct CodeSet;
@@ -135,6 +135,11 @@ public:
return mutex;
}
/// Gets the address to the process' dedicated TLS region.
VAddr GetTLSRegionAddress() const {
return tls_region_address;
}
/// Gets the current status of the process
ProcessStatus GetStatus() const {
return status;
@@ -168,8 +173,24 @@ public:
return capabilities.GetPriorityMask();
}
u32 IsVirtualMemoryEnabled() const {
return is_virtual_address_memory_enabled;
/// Gets the amount of secure memory to allocate for memory management.
u32 GetSystemResourceSize() const {
return system_resource_size;
}
/// Gets the amount of secure memory currently in use for memory management.
u32 GetSystemResourceUsage() const {
// On hardware, this returns the amount of system resource memory that has
// been used by the kernel. This is problematic for Yuzu to emulate, because
// system resource memory is used for page tables -- and yuzu doesn't really
// have a way to calculate how much memory is required for page tables for
// the current process at any given time.
// TODO: Is this even worth implementing? Games may retrieve this value via
// an SDK function that gets used + available system resource size for debug
// or diagnostic purposes. However, it seems unlikely that a game would make
// decisions based on how much system memory is dedicated to its page tables.
// Is returning a value other than zero wise?
return 0;
}
/// Whether this process is an AArch64 or AArch32 process.
@@ -196,15 +217,15 @@ public:
u64 GetTotalPhysicalMemoryAvailable() const;
/// Retrieves the total physical memory available to this process in bytes,
/// without the size of the personal heap added to it.
u64 GetTotalPhysicalMemoryAvailableWithoutMmHeap() const;
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
/// Retrieves the total physical memory used by this process in bytes.
u64 GetTotalPhysicalMemoryUsed() const;
/// Retrieves the total physical memory used by this process in bytes,
/// without the size of the personal heap added to it.
u64 GetTotalPhysicalMemoryUsedWithoutMmHeap() const;
/// without the size of the personal system resource heap added to it.
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
const std::list<const Thread*>& GetThreadList() const {
@@ -260,10 +281,10 @@ public:
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread);
[[nodiscard]] VAddr CreateTLSRegion();
// Frees a used TLS slot identified by the given address
void FreeTLSSlot(VAddr tls_address);
void FreeTLSRegion(VAddr tls_address);
private:
explicit Process(Core::System& system);
@@ -280,6 +301,9 @@ private:
/// a process signal.
void ChangeStatus(ProcessStatus new_status);
/// Allocates the main thread stack for the process, given the stack size in bytes.
void AllocateMainThreadStack(u64 stack_size);
/// Memory manager for this process.
Kernel::VMManager vm_manager;
@@ -290,7 +314,7 @@ private:
u64 code_memory_size = 0;
/// Current status of the process
ProcessStatus status;
ProcessStatus status{};
/// The ID of this process
u64 process_id = 0;
@@ -298,19 +322,23 @@ private:
/// Title ID corresponding to the process
u64 program_id = 0;
/// Specifies additional memory to be reserved for the process's memory management by the
/// system. When this is non-zero, secure memory is allocated and used for page table allocation
/// instead of using the normal global page tables/memory block management.
u32 system_resource_size = 0;
/// Resource limit descriptor for this process
SharedPtr<ResourceLimit> resource_limit;
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
u32 is_virtual_address_memory_enabled = 0;
/// The Thread Local Storage area is allocated as processes create threads,
/// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
/// page as a bitmask.
/// This vector will grow as more pages are allocated for new threads.
std::vector<std::bitset<8>> tls_slots;
std::vector<TLSPage> tls_pages;
/// Contains the parsed process capability descriptors.
ProcessCapabilities capabilities;
@@ -338,8 +366,11 @@ private:
/// variable related facilities.
Mutex mutex;
/// Address indicating the location of the process' dedicated TLS region.
VAddr tls_region_address = 0;
/// Random values for svcGetInfo RandomEntropy
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<const Thread*> thread_list;

View File

@@ -28,7 +28,7 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_
shared_memory->other_permissions = other_permissions;
if (address == 0) {
shared_memory->backing_block = std::make_shared<std::vector<u8>>(size);
shared_memory->backing_block = std::make_shared<Kernel::PhysicalMemory>(size);
shared_memory->backing_block_offset = 0;
// Refresh the address mappings for the current process.
@@ -59,8 +59,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_
}
SharedPtr<SharedMemory> SharedMemory::CreateForApplet(
KernelCore& kernel, std::shared_ptr<std::vector<u8>> heap_block, std::size_t offset, u64 size,
MemoryPermission permissions, MemoryPermission other_permissions, std::string name) {
KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset,
u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) {
SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel));
shared_memory->owner_process = nullptr;

View File

@@ -10,6 +10,7 @@
#include "common/common_types.h"
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/physical_memory.h"
#include "core/hle/kernel/process.h"
#include "core/hle/result.h"
@@ -62,12 +63,10 @@ public:
* block.
* @param name Optional object name, used for debugging purposes.
*/
static SharedPtr<SharedMemory> CreateForApplet(KernelCore& kernel,
std::shared_ptr<std::vector<u8>> heap_block,
std::size_t offset, u64 size,
MemoryPermission permissions,
MemoryPermission other_permissions,
std::string name = "Unknown Applet");
static SharedPtr<SharedMemory> CreateForApplet(
KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset,
u64 size, MemoryPermission permissions, MemoryPermission other_permissions,
std::string name = "Unknown Applet");
std::string GetTypeName() const override {
return "SharedMemory";
@@ -135,7 +134,7 @@ private:
~SharedMemory() override;
/// Backing memory for this shared memory block.
std::shared_ptr<std::vector<u8>> backing_block;
std::shared_ptr<PhysicalMemory> backing_block;
/// Offset into the backing block for this shared memory.
std::size_t backing_block_offset = 0;
/// Size of the memory block. Page-aligned.

View File

@@ -98,9 +98,9 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return ERR_INVALID_ADDRESS_STATE;
}
if (!vm_manager.IsWithinNewMapRegion(dst_addr, size)) {
if (!vm_manager.IsWithinStackRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
"Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}",
"Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
return ERR_INVALID_MEMORY_RANGE;
}
@@ -318,7 +318,14 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return result;
}
return vm_manager.UnmapRange(dst_addr, size);
const auto unmap_res = vm_manager.UnmapRange(dst_addr, size);
// Reprotect the source mapping on success
if (unmap_res.IsSuccess()) {
ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess());
}
return unmap_res;
}
/// Connect to an OS service given the port name, returns the handle to the port to out
@@ -726,19 +733,19 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
// 2.0.0+
ASLRRegionBaseAddr = 12,
ASLRRegionSize = 13,
NewMapRegionBaseAddr = 14,
NewMapRegionSize = 15,
StackRegionBaseAddr = 14,
StackRegionSize = 15,
// 3.0.0+
IsVirtualAddressMemoryEnabled = 16,
PersonalMmHeapUsage = 17,
SystemResourceSize = 16,
SystemResourceUsage = 17,
TitleId = 18,
// 4.0.0+
PrivilegedProcessId = 19,
// 5.0.0+
UserExceptionContextAddr = 20,
// 6.0.0+
TotalPhysicalMemoryAvailableWithoutMmHeap = 21,
TotalPhysicalMemoryUsedWithoutMmHeap = 22,
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
};
const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -752,16 +759,16 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::HeapRegionSize:
case GetInfoType::ASLRRegionBaseAddr:
case GetInfoType::ASLRRegionSize:
case GetInfoType::NewMapRegionBaseAddr:
case GetInfoType::NewMapRegionSize:
case GetInfoType::StackRegionBaseAddr:
case GetInfoType::StackRegionSize:
case GetInfoType::TotalPhysicalMemoryAvailable:
case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
case GetInfoType::PersonalMmHeapUsage:
case GetInfoType::SystemResourceSize:
case GetInfoType::SystemResourceUsage:
case GetInfoType::TitleId:
case GetInfoType::UserExceptionContextAddr:
case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap:
case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap: {
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
if (info_sub_id != 0) {
return ERR_INVALID_ENUM_VALUE;
}
@@ -806,12 +813,12 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
*result = process->VMManager().GetASLRRegionSize();
return RESULT_SUCCESS;
case GetInfoType::NewMapRegionBaseAddr:
*result = process->VMManager().GetNewMapRegionBaseAddress();
case GetInfoType::StackRegionBaseAddr:
*result = process->VMManager().GetStackRegionBaseAddress();
return RESULT_SUCCESS;
case GetInfoType::NewMapRegionSize:
*result = process->VMManager().GetNewMapRegionSize();
case GetInfoType::StackRegionSize:
*result = process->VMManager().GetStackRegionSize();
return RESULT_SUCCESS;
case GetInfoType::TotalPhysicalMemoryAvailable:
@@ -822,8 +829,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
*result = process->GetTotalPhysicalMemoryUsed();
return RESULT_SUCCESS;
case GetInfoType::IsVirtualAddressMemoryEnabled:
*result = process->IsVirtualMemoryEnabled();
case GetInfoType::SystemResourceSize:
*result = process->GetSystemResourceSize();
return RESULT_SUCCESS;
case GetInfoType::SystemResourceUsage:
LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
*result = process->GetSystemResourceUsage();
return RESULT_SUCCESS;
case GetInfoType::TitleId:
@@ -831,17 +843,15 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
return RESULT_SUCCESS;
case GetInfoType::UserExceptionContextAddr:
LOG_WARNING(Kernel_SVC,
"(STUBBED) Attempted to query user exception context address, returned 0");
*result = 0;
*result = process->GetTLSRegionAddress();
return RESULT_SUCCESS;
case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap:
*result = process->GetTotalPhysicalMemoryAvailable();
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
*result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
return RESULT_SUCCESS;
case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap:
*result = process->GetTotalPhysicalMemoryUsedWithoutMmHeap();
case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource:
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
return RESULT_SUCCESS;
default:
@@ -946,6 +956,86 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
}
}
/// Maps memory at a desired address
static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
return ERR_INVALID_ADDRESS;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
return ERR_INVALID_SIZE;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
return ERR_INVALID_SIZE;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
return ERR_INVALID_MEMORY_RANGE;
}
Process* const current_process = system.Kernel().CurrentProcess();
auto& vm_manager = current_process->VMManager();
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
return ERR_INVALID_STATE;
}
if (!vm_manager.IsWithinMapRegion(addr, size)) {
LOG_ERROR(Kernel_SVC, "Range not within map region");
return ERR_INVALID_MEMORY_RANGE;
}
return vm_manager.MapPhysicalMemory(addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
return ERR_INVALID_ADDRESS;
}
if (!Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
return ERR_INVALID_SIZE;
}
if (size == 0) {
LOG_ERROR(Kernel_SVC, "Size is zero");
return ERR_INVALID_SIZE;
}
if (!(addr < addr + size)) {
LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
return ERR_INVALID_MEMORY_RANGE;
}
Process* const current_process = system.Kernel().CurrentProcess();
auto& vm_manager = current_process->VMManager();
if (current_process->GetSystemResourceSize() == 0) {
LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
return ERR_INVALID_STATE;
}
if (!vm_manager.IsWithinMapRegion(addr, size)) {
LOG_ERROR(Kernel_SVC, "Range not within map region");
return ERR_INVALID_MEMORY_RANGE;
}
return vm_manager.UnmapPhysicalMemory(addr, size);
}
/// Sets the thread activity
static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
@@ -1647,8 +1737,8 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
// Wait for an address (via Address Arbiter)
static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
s64 timeout) {
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}",
address, type, value, timeout);
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
type, value, timeout);
// If the passed address is a kernel virtual address, return invalid memory state.
if (Memory::IsKernelVirtualAddress(address)) {
@@ -1670,8 +1760,8 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
// Signals to an address (via Address Arbiter)
static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
s32 num_to_wake) {
LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
address, type, value, num_to_wake);
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
address, type, value, num_to_wake);
// If the passed address is a kernel virtual address, return invalid memory state.
if (Memory::IsKernelVirtualAddress(address)) {
@@ -2303,8 +2393,8 @@ static const FunctionDef SVC_Table[] = {
{0x29, SvcWrap<GetInfo>, "GetInfo"},
{0x2A, nullptr, "FlushEntireDataCache"},
{0x2B, nullptr, "FlushDataCache"},
{0x2C, nullptr, "MapPhysicalMemory"},
{0x2D, nullptr, "UnmapPhysicalMemory"},
{0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"},
{0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
{0x2E, nullptr, "GetFutureThreadInfo"},
{0x2F, nullptr, "GetLastThreadInfo"},
{0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},

View File

@@ -32,6 +32,11 @@ void SvcWrap(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0)).raw);
}
template <ResultCode func(Core::System&, u64, u64)>
void SvcWrap(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
}
template <ResultCode func(Core::System&, u32)>
void SvcWrap(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);

View File

@@ -65,7 +65,7 @@ void Thread::Stop() {
owner_process->UnregisterThread(this);
// Mark the TLS slot in the thread's page as free.
owner_process->FreeTLSSlot(tls_address);
owner_process->FreeTLSRegion(tls_address);
}
void Thread::WakeAfterDelay(s64 nanoseconds) {
@@ -76,13 +76,13 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
// This function might be called from any thread so we have to be cautious and use the
// thread-safe version of ScheduleEvent.
const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe(
Core::System::GetInstance().CoreTiming().ScheduleEvent(
cycles, kernel.ThreadWakeupCallbackEventType(), callback_handle);
}
void Thread::CancelWakeupTimer() {
Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe(
kernel.ThreadWakeupCallbackEventType(), callback_handle);
Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
callback_handle);
}
static std::optional<s32> GetNextProcessorId(u64 mask) {
@@ -205,9 +205,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
thread->tls_address = thread->owner_process->CreateTLSRegion();
thread->scheduler = &system.Scheduler(processor_id);
thread->scheduler->AddThread(thread);
thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
thread->owner_process->RegisterThread(thread.get());

View File

@@ -5,7 +5,6 @@
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <vector>
@@ -78,9 +77,6 @@ enum class ThreadActivity : u32 {
class Thread final : public WaitObject {
public:
using TLSMemory = std::vector<u8>;
using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
using ThreadContext = Core::ARM_Interface::ThreadContext;
@@ -169,14 +165,6 @@ public:
return thread_id;
}
TLSMemoryPtr& GetTLSMemory() {
return tls_memory;
}
const TLSMemoryPtr& GetTLSMemory() const {
return tls_memory;
}
/// Resumes a thread from waiting
void ResumeFromWait();
@@ -463,11 +451,9 @@ private:
u32 ideal_core{0xFFFFFFFF};
u64 affinity_mask{0x1};
TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
ThreadActivity activity = ThreadActivity::Normal;
std::string name;
ThreadActivity activity = ThreadActivity::Normal;
};
/**

View File

@@ -47,7 +47,7 @@ ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission p
return ERR_INVALID_STATE;
}
backing_block = std::make_shared<std::vector<u8>>(size);
backing_block = std::make_shared<PhysicalMemory>(size);
const auto map_state = owner_permissions == MemoryPermission::None
? MemoryState::TransferMemoryIsolated

View File

@@ -8,6 +8,7 @@
#include <vector>
#include "core/hle/kernel/object.h"
#include "core/hle/kernel/physical_memory.h"
union ResultCode;
@@ -82,7 +83,7 @@ private:
~TransferMemory() override;
/// Memory block backing this instance.
std::shared_ptr<std::vector<u8>> backing_block;
std::shared_ptr<PhysicalMemory> backing_block;
/// The base address for the memory managed by this instance.
VAddr base_address = 0;

View File

@@ -5,13 +5,15 @@
#include <algorithm>
#include <iterator>
#include <utility>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/memory_hook.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/errors.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/resource_limit.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
#include "core/memory_setup.h"
@@ -49,10 +51,14 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
type != next.type) {
return false;
}
if (type == VMAType::AllocatedMemoryBlock &&
(backing_block != next.backing_block || offset + size != next.offset)) {
if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) {
// TODO: Can device mapped memory be merged sanely?
// Not merging it may cause inaccuracies versus hardware when memory layout is queried.
return false;
}
if (type == VMAType::AllocatedMemoryBlock) {
return true;
}
if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
return false;
}
@@ -98,9 +104,9 @@ bool VMManager::IsValidHandle(VMAHandle handle) const {
}
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block,
std::shared_ptr<PhysicalMemory> block,
std::size_t offset, u64 size,
MemoryState state) {
MemoryState state, VMAPermission perm) {
ASSERT(block != nullptr);
ASSERT(offset + size <= block->size());
@@ -109,17 +115,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
VirtualMemoryArea& final_vma = vma_handle->second;
ASSERT(final_vma.size == size);
system.ArmInterface(0).MapBackingMemory(target, size, block->data() + offset,
VMAPermission::ReadWriteExecute);
system.ArmInterface(1).MapBackingMemory(target, size, block->data() + offset,
VMAPermission::ReadWriteExecute);
system.ArmInterface(2).MapBackingMemory(target, size, block->data() + offset,
VMAPermission::ReadWriteExecute);
system.ArmInterface(3).MapBackingMemory(target, size, block->data() + offset,
VMAPermission::ReadWriteExecute);
final_vma.type = VMAType::AllocatedMemoryBlock;
final_vma.permissions = VMAPermission::ReadWrite;
final_vma.permissions = perm;
final_vma.state = state;
final_vma.backing_block = std::move(block);
final_vma.offset = offset;
@@ -137,11 +134,6 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
VirtualMemoryArea& final_vma = vma_handle->second;
ASSERT(final_vma.size == size);
system.ArmInterface(0).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
system.ArmInterface(1).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
system.ArmInterface(2).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
system.ArmInterface(3).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute);
final_vma.type = VMAType::BackingMemory;
final_vma.permissions = VMAPermission::ReadWrite;
final_vma.state = state;
@@ -152,22 +144,33 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
}
ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
// Find the first Free VMA.
const VAddr base = GetASLRRegionBaseAddress();
const VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
if (vma.second.type != VMAType::Free)
return false;
return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size);
}
const VAddr vma_end = vma.second.base + vma.second.size;
return vma_end > base && vma_end >= base + size;
});
ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const {
ASSERT(begin < end);
ASSERT(size <= end - begin);
if (vma_handle == vma_map.end()) {
const VMAHandle vma_handle =
std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) {
if (vma.second.type != VMAType::Free) {
return false;
}
const VAddr vma_base = vma.second.base;
const VAddr vma_end = vma_base + vma.second.size;
const VAddr assumed_base = (begin < vma_base) ? vma_base : begin;
const VAddr used_range = assumed_base + size;
return vma_base <= assumed_base && assumed_base < used_range && used_range < end &&
used_range <= vma_end;
});
if (vma_handle == vma_map.cend()) {
// TODO(Subv): Find the correct error code here.
return ResultCode(-1);
}
const VAddr target = std::max(base, vma_handle->second.base);
const VAddr target = std::max(begin, vma_handle->second.base);
return MakeResult<VAddr>(target);
}
@@ -219,11 +222,6 @@ ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
ASSERT(FindVMA(target)->second.size >= size);
system.ArmInterface(0).UnmapMemory(target, size);
system.ArmInterface(1).UnmapMemory(target, size);
system.ArmInterface(2).UnmapMemory(target, size);
system.ArmInterface(3).UnmapMemory(target, size);
return RESULT_SUCCESS;
}
@@ -263,7 +261,7 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
if (heap_memory == nullptr) {
// Initialize heap
heap_memory = std::make_shared<std::vector<u8>>(size);
heap_memory = std::make_shared<PhysicalMemory>(size);
heap_end = heap_region_base + size;
} else {
UnmapRange(heap_region_base, GetCurrentHeapSize());
@@ -297,6 +295,166 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
return MakeResult<VAddr>(heap_region_base);
}
ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
ResultCode result = RESULT_SUCCESS;
// Check how much memory we've already mapped.
const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
if (mapped_size_result.Failed()) {
return mapped_size_result.Code();
}
// If we've already mapped the desired amount, return early.
const std::size_t mapped_size = *mapped_size_result;
if (mapped_size == size) {
return RESULT_SUCCESS;
}
// Check that we can map the memory we want.
const auto res_limit = system.CurrentProcess()->GetResourceLimit();
const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) -
res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory);
if (physmem_remaining < (size - mapped_size)) {
return ERR_RESOURCE_LIMIT_EXCEEDED;
}
// Keep track of the memory regions we unmap.
std::vector<std::pair<u64, u64>> mapped_regions;
// Iterate, trying to map memory.
{
cur_addr = target;
auto iter = FindVMA(target);
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
while (true) {
const auto& vma = iter->second;
const auto vma_start = vma.base;
const auto vma_end = vma_start + vma.size;
const auto vma_last = vma_end - 1;
// Map the memory block
const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
if (vma.state == MemoryState::Unmapped) {
const auto map_res =
MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size, 0), 0,
map_size, MemoryState::Heap, VMAPermission::ReadWrite);
result = map_res.Code();
if (result.IsError()) {
break;
}
mapped_regions.emplace_back(cur_addr, map_size);
}
// Break once we hit the end of the range.
if (last_addr <= vma_last) {
break;
}
// Advance to the next block.
cur_addr = vma_end;
iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
}
}
// If we failed, unmap memory.
if (result.IsError()) {
for (const auto [unmap_address, unmap_size] : mapped_regions) {
ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
"MapPhysicalMemory un-map on error");
}
return result;
}
// Update amount of mapped physical memory.
physical_memory_mapped += size - mapped_size;
return RESULT_SUCCESS;
}
ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
ResultCode result = RESULT_SUCCESS;
// Check how much memory is currently mapped.
const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
if (mapped_size_result.Failed()) {
return mapped_size_result.Code();
}
// If we've already unmapped all the memory, return early.
const std::size_t mapped_size = *mapped_size_result;
if (mapped_size == 0) {
return RESULT_SUCCESS;
}
// Keep track of the memory regions we unmap.
std::vector<std::pair<u64, u64>> unmapped_regions;
// Try to unmap regions.
{
cur_addr = target;
auto iter = FindVMA(target);
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
while (true) {
const auto& vma = iter->second;
const auto vma_start = vma.base;
const auto vma_end = vma_start + vma.size;
const auto vma_last = vma_end - 1;
// Unmap the memory block
const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
if (vma.state == MemoryState::Heap) {
result = UnmapRange(cur_addr, unmap_size);
if (result.IsError()) {
break;
}
unmapped_regions.emplace_back(cur_addr, unmap_size);
}
// Break once we hit the end of the range.
if (last_addr <= vma_last) {
break;
}
// Advance to the next block.
cur_addr = vma_end;
iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
}
}
// If we failed, re-map regions.
// TODO: Preserve memory contents?
if (result.IsError()) {
for (const auto [map_address, map_size] : unmapped_regions) {
const auto remap_res =
MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size, 0), 0,
map_size, MemoryState::Heap, VMAPermission::None);
ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error");
}
}
// Update mapped amount
physical_memory_mapped -= mapped_size;
return RESULT_SUCCESS;
}
ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
const auto src_check_result = CheckRangeState(
@@ -436,7 +594,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem
ASSERT_MSG(vma_offset + size <= vma->second.size,
"Shared memory exceeds bounds of mapped block");
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block;
const std::size_t backing_block_offset = vma->second.offset + vma_offset;
CASCADE_RESULT(auto new_vma,
@@ -444,12 +602,12 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem
// Protect mirror with permissions from old region
Reprotect(new_vma, vma->second.permissions);
// Remove permissions from old region
Reprotect(vma, VMAPermission::None);
ReprotectRange(src_addr, size, VMAPermission::None);
return RESULT_SUCCESS;
}
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) {
// If this ever proves to have a noticeable performance impact, allow users of the function to
// specify a specific range of addresses to limit the scan to.
for (const auto& p : vma_map) {
@@ -577,14 +735,14 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
const VMAIter next_vma = std::next(iter);
if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
iter->second.size += next_vma->second.size;
MergeAdjacentVMA(iter->second, next_vma->second);
vma_map.erase(next_vma);
}
if (iter != vma_map.begin()) {
VMAIter prev_vma = std::prev(iter);
if (prev_vma->second.CanBeMergedWith(iter->second)) {
prev_vma->second.size += iter->second.size;
MergeAdjacentVMA(prev_vma->second, iter->second);
vma_map.erase(iter);
iter = prev_vma;
}
@@ -593,6 +751,38 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
return iter;
}
void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) {
ASSERT(left.CanBeMergedWith(right));
// Always merge allocated memory blocks, even when they don't share the same backing block.
if (left.type == VMAType::AllocatedMemoryBlock &&
(left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
// Check if we can save work.
if (left.offset == 0 && left.size == left.backing_block->size()) {
// Fast case: left is an entire backing block.
left.backing_block->insert(left.backing_block->end(),
right.backing_block->begin() + right.offset,
right.backing_block->begin() + right.offset + right.size);
} else {
// Slow case: make a new memory block for left and right.
auto new_memory = std::make_shared<PhysicalMemory>();
new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset,
left.backing_block->begin() + left.offset + left.size);
new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset,
right.backing_block->begin() + right.offset + right.size);
left.backing_block = new_memory;
left.offset = 0;
}
// Page table update is needed, because backing memory changed.
left.size += right.size;
UpdatePageTableForVMA(left);
} else {
// Just update the size.
left.size += right.size;
}
}
void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
switch (vma.type) {
case VMAType::Free:
@@ -614,9 +804,11 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) {
u64 map_region_size = 0;
u64 heap_region_size = 0;
u64 new_map_region_size = 0;
u64 stack_region_size = 0;
u64 tls_io_region_size = 0;
u64 stack_and_tls_io_end = 0;
switch (type) {
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -632,6 +824,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
map_region_size = 0;
heap_region_size = 0x80000000;
}
stack_and_tls_io_end = 0x40000000;
break;
case FileSys::ProgramAddressSpaceType::Is36Bit:
address_space_width = 36;
@@ -641,6 +834,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
aslr_region_end = aslr_region_base + 0xFF8000000;
map_region_size = 0x180000000;
heap_region_size = 0x180000000;
stack_and_tls_io_end = 0x80000000;
break;
case FileSys::ProgramAddressSpaceType::Is39Bit:
address_space_width = 39;
@@ -650,7 +844,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
aslr_region_end = aslr_region_base + 0x7FF8000000;
map_region_size = 0x1000000000;
heap_region_size = 0x180000000;
new_map_region_size = 0x80000000;
stack_region_size = 0x80000000;
tls_io_region_size = 0x1000000000;
break;
default:
@@ -658,6 +852,8 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
return;
}
const u64 stack_and_tls_io_begin = aslr_region_base;
address_space_base = 0;
address_space_end = 1ULL << address_space_width;
@@ -668,15 +864,20 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_end = heap_region_base + heap_region_size;
heap_end = heap_region_base;
new_map_region_base = heap_region_end;
new_map_region_end = new_map_region_base + new_map_region_size;
stack_region_base = heap_region_end;
stack_region_end = stack_region_base + stack_region_size;
tls_io_region_base = new_map_region_end;
tls_io_region_base = stack_region_end;
tls_io_region_end = tls_io_region_base + tls_io_region_size;
if (new_map_region_size == 0) {
new_map_region_base = address_space_base;
new_map_region_end = address_space_end;
if (stack_region_size == 0) {
stack_region_base = stack_and_tls_io_begin;
stack_region_end = stack_and_tls_io_end;
}
if (tls_io_region_size == 0) {
tls_io_region_base = stack_and_tls_io_begin;
tls_io_region_end = stack_and_tls_io_end;
}
}
@@ -756,6 +957,84 @@ VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, Memo
std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
}
ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
std::size_t size) const {
const VAddr end_addr = address + size;
const VAddr last_addr = end_addr - 1;
std::size_t mapped_size = 0;
VAddr cur_addr = address;
auto iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
while (true) {
const auto& vma = iter->second;
const VAddr vma_start = vma.base;
const VAddr vma_end = vma_start + vma.size;
const VAddr vma_last = vma_end - 1;
// Add size if relevant.
if (vma.state != MemoryState::Unmapped) {
mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
}
// Break once we hit the end of the range.
if (last_addr <= vma_last) {
break;
}
// Advance to the next block.
cur_addr = vma_end;
iter = std::next(iter);
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
}
return MakeResult(mapped_size);
}
ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
std::size_t size) const {
const VAddr end_addr = address + size;
const VAddr last_addr = end_addr - 1;
std::size_t mapped_size = 0;
VAddr cur_addr = address;
auto iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
while (true) {
const auto& vma = iter->second;
const auto vma_start = vma.base;
const auto vma_end = vma_start + vma.size;
const auto vma_last = vma_end - 1;
const auto state = vma.state;
const auto attr = vma.attribute;
// Memory within region must be free or mapped heap.
if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
(state == MemoryState::Unmapped))) {
return ERR_INVALID_ADDRESS_STATE;
}
// Add size if relevant.
if (state != MemoryState::Unmapped) {
mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
}
// Break once we hit the end of the range.
if (last_addr <= vma_last) {
break;
}
// Advance to the next block.
cur_addr = vma_end;
iter = std::next(iter);
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
}
return MakeResult(mapped_size);
}
u64 VMManager::GetTotalPhysicalMemoryAvailable() const {
LOG_WARNING(Kernel, "(STUBBED) called");
return 0xF8000000;
@@ -868,21 +1147,21 @@ bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
}
VAddr VMManager::GetNewMapRegionBaseAddress() const {
return new_map_region_base;
VAddr VMManager::GetStackRegionBaseAddress() const {
return stack_region_base;
}
VAddr VMManager::GetNewMapRegionEndAddress() const {
return new_map_region_end;
VAddr VMManager::GetStackRegionEndAddress() const {
return stack_region_end;
}
u64 VMManager::GetNewMapRegionSize() const {
return new_map_region_end - new_map_region_base;
u64 VMManager::GetStackRegionSize() const {
return stack_region_end - stack_region_base;
}
bool VMManager::IsWithinNewMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(),
GetNewMapRegionEndAddress());
bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(),
GetStackRegionEndAddress());
}
VAddr VMManager::GetTLSIORegionBaseAddress() const {

View File

@@ -11,6 +11,7 @@
#include "common/common_types.h"
#include "common/memory_hook.h"
#include "common/page_table.h"
#include "core/hle/kernel/physical_memory.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
// Settings for type = AllocatedMemoryBlock
/// Memory block backing this VMA.
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
std::shared_ptr<PhysicalMemory> backing_block = nullptr;
/// Offset into the backing_memory the mapping starts from.
std::size_t offset = 0;
@@ -348,8 +349,9 @@ public:
* @param size Size of the mapping.
* @param state MemoryState tag to attach to the VMA.
*/
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
std::size_t offset, u64 size, MemoryState state);
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block,
std::size_t offset, u64 size, MemoryState state,
VMAPermission perm = VMAPermission::ReadWrite);
/**
* Maps an unmanaged host memory pointer at a given address.
@@ -362,13 +364,38 @@ public:
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
/**
* Finds the first free address that can hold a region of the desired size.
* Finds the first free memory region of the given size within
* the user-addressable ASLR memory region.
*
* @param size Size of the desired region.
* @return The found free address.
* @param size The size of the desired region in bytes.
*
* @returns If successful, the base address of the free region with
* the given size.
*/
ResultVal<VAddr> FindFreeRegion(u64 size) const;
/**
* Finds the first free address range that can hold a region of the desired size
*
* @param begin The starting address of the range.
* This is treated as an inclusive beginning address.
*
* @param end The ending address of the range.
* This is treated as an exclusive ending address.
*
* @param size The size of the free region to attempt to locate,
* in bytes.
*
* @returns If successful, the base address of the free region with
* the given size.
*
* @returns If unsuccessful, a result containing an error code.
*
* @pre The starting address must be less than the ending address.
* @pre The size must not exceed the address range itself.
*/
ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const;
/**
* Maps a memory-mapped IO region at a given address.
*
@@ -425,6 +452,34 @@ public:
///
ResultVal<VAddr> SetHeapSize(u64 size);
/// Maps memory at a given address.
///
/// @param addr The virtual address to map memory at.
/// @param size The amount of memory to map.
///
/// @note The destination address must lie within the Map region.
///
/// @note This function requires that SystemResourceSize be non-zero,
/// however, this is just because if it were not then the
/// resulting page tables could be exploited on hardware by
/// a malicious program. SystemResource usage does not need
/// to be explicitly checked or updated here.
ResultCode MapPhysicalMemory(VAddr target, u64 size);
/// Unmaps memory at a given address.
///
/// @param addr The virtual address to unmap memory at.
/// @param size The amount of memory to unmap.
///
/// @note The destination address must lie within the Map region.
///
/// @note This function requires that SystemResourceSize be non-zero,
/// however, this is just because if it were not then the
/// resulting page tables could be exploited on hardware by
/// a malicious program. SystemResource usage does not need
/// to be explicitly checked or updated here.
ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
/// Maps a region of memory as code memory.
///
/// @param dst_address The base address of the region to create the aliasing memory region.
@@ -493,7 +548,7 @@ public:
* Scans all VMAs and updates the page table range of any that use the given vector as backing
* memory. This should be called after any operation that causes reallocation of the vector.
*/
void RefreshMemoryBlockMappings(const std::vector<u8>* block);
void RefreshMemoryBlockMappings(const PhysicalMemory* block);
/// Dumps the address space layout to the log, for debugging
void LogLayout() const;
@@ -571,17 +626,17 @@ public:
/// Determines whether or not the specified range is within the map region.
bool IsWithinMapRegion(VAddr address, u64 size) const;
/// Gets the base address of the new map region.
VAddr GetNewMapRegionBaseAddress() const;
/// Gets the base address of the stack region.
VAddr GetStackRegionBaseAddress() const;
/// Gets the end address of the new map region.
VAddr GetNewMapRegionEndAddress() const;
/// Gets the end address of the stack region.
VAddr GetStackRegionEndAddress() const;
/// Gets the total size of the new map region in bytes.
u64 GetNewMapRegionSize() const;
/// Gets the total size of the stack region in bytes.
u64 GetStackRegionSize() const;
/// Determines whether or not the given address range is within the new map region
bool IsWithinNewMapRegion(VAddr address, u64 size) const;
/// Determines whether or not the given address range is within the stack region
bool IsWithinStackRegion(VAddr address, u64 size) const;
/// Gets the base address of the TLS IO region.
VAddr GetTLSIORegionBaseAddress() const;
@@ -632,6 +687,11 @@ private:
*/
VMAIter MergeAdjacent(VMAIter vma);
/**
* Merges two adjacent VMAs.
*/
void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right);
/// Updates the pages corresponding to this VMA so they match the VMA's attributes.
void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
@@ -676,6 +736,13 @@ private:
MemoryAttribute attribute_mask, MemoryAttribute attribute,
MemoryAttribute ignore_mask) const;
/// Gets the amount of memory currently mapped (state != Unmapped) in a range.
ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const;
/// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range.
ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
std::size_t size) const;
/**
* A map covering the entirety of the managed address space, keyed by the `base` field of each
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
@@ -701,8 +768,8 @@ private:
VAddr map_region_base = 0;
VAddr map_region_end = 0;
VAddr new_map_region_base = 0;
VAddr new_map_region_end = 0;
VAddr stack_region_base = 0;
VAddr stack_region_end = 0;
VAddr tls_io_region_base = 0;
VAddr tls_io_region_end = 0;
@@ -711,12 +778,17 @@ private:
// the entire virtual address space extents that bound the allocations, including any holes.
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
std::shared_ptr<std::vector<u8>> heap_memory;
std::shared_ptr<PhysicalMemory> heap_memory;
// The end of the currently allocated heap. This is not an inclusive
// end of the range. This is essentially 'base_address + current_size'.
VAddr heap_end = 0;
// The current amount of memory mapped via MapPhysicalMemory.
// This is used here (and in Nintendo's kernel) only for debugging, and does not impact
// any behavior.
u64 physical_memory_mapped = 0;
Core::System& system;
};
} // namespace Kernel

View File

@@ -15,13 +15,18 @@
#include "core/file_sys/control_metadata.h"
#include "core/file_sys/patch_manager.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/acc/acc.h"
#include "core/hle/service/acc/acc_aa.h"
#include "core/hle/service/acc/acc_su.h"
#include "core/hle/service/acc/acc_u0.h"
#include "core/hle/service/acc/acc_u1.h"
#include "core/hle/service/acc/errors.h"
#include "core/hle/service/acc/profile_manager.h"
#include "core/hle/service/glue/arp.h"
#include "core/hle/service/glue/manager.h"
#include "core/hle/service/sm/sm.h"
#include "core/loader/loader.h"
namespace Service::Account {
@@ -217,10 +222,72 @@ void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestCon
rb.Push(profile_manager->CanSystemRegisterUser());
}
void Module::Interface::InitializeApplicationInfoOld(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_ACC, "(STUBBED) called");
void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
auto pid = rp.Pop<u64>();
LOG_DEBUG(Service_ACC, "called, process_id={}", pid);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
rb.Push(InitializeApplicationInfoBase(pid));
}
void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
auto pid = rp.Pop<u64>();
LOG_WARNING(Service_ACC, "(Partial implementation) called, process_id={}", pid);
// TODO(ogniK): We require checking if the user actually owns the title and what not. As of
// currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
// first then we do extra checks if the game is a digital copy.
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(InitializeApplicationInfoBase(pid));
}
ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
if (application_info) {
LOG_ERROR(Service_ACC, "Application already initialized");
return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
}
const auto& list = system.Kernel().GetProcessList();
const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
return process->GetProcessID() == process_id;
});
if (iter == list.end()) {
LOG_ERROR(Service_ACC, "Failed to find process ID");
application_info.application_type = ApplicationType::Unknown;
return ERR_ACCOUNTINFO_BAD_APPLICATION;
}
const auto launch_property = system.GetARPManager().GetLaunchProperty((*iter)->GetTitleID());
if (launch_property.Failed()) {
LOG_ERROR(Service_ACC, "Failed to get launch property");
return ERR_ACCOUNTINFO_BAD_APPLICATION;
}
switch (launch_property->base_game_storage_id) {
case FileSys::StorageId::GameCard:
application_info.application_type = ApplicationType::GameCard;
break;
case FileSys::StorageId::Host:
case FileSys::StorageId::NandUser:
case FileSys::StorageId::SdCard:
application_info.application_type = ApplicationType::Digital;
break;
default:
LOG_ERROR(Service_ACC, "Invalid game storage ID");
return ERR_ACCOUNTINFO_BAD_APPLICATION;
}
LOG_WARNING(Service_ACC, "ApplicationInfo init required");
// TODO(ogniK): Actual initalization here
return RESULT_SUCCESS;
}
void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx) {

View File

@@ -4,6 +4,7 @@
#pragma once
#include "core/hle/service/glue/manager.h"
#include "core/hle/service/service.h"
namespace Service::Account {
@@ -25,12 +26,33 @@ public:
void ListOpenUsers(Kernel::HLERequestContext& ctx);
void GetLastOpenedUser(Kernel::HLERequestContext& ctx);
void GetProfile(Kernel::HLERequestContext& ctx);
void InitializeApplicationInfoOld(Kernel::HLERequestContext& ctx);
void InitializeApplicationInfo(Kernel::HLERequestContext& ctx);
void InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx);
void GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx);
void IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx);
void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
private:
ResultCode InitializeApplicationInfoBase(u64 process_id);
enum class ApplicationType : u32_le {
GameCard = 0,
Digital = 1,
Unknown = 3,
};
struct ApplicationInfo {
Service::Glue::ApplicationLaunchProperty launch_property;
ApplicationType application_type;
constexpr explicit operator bool() const {
return launch_property.title_id != 0x0;
}
};
ApplicationInfo application_info{};
protected:
std::shared_ptr<Module> module;
std::shared_ptr<ProfileManager> profile_manager;

View File

@@ -22,7 +22,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{51, &ACC_U0::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
{60, nullptr, "ListOpenContextStoredUsers"},
{99, nullptr, "DebugActivateOpenContextRetention"},
{100, &ACC_U0::InitializeApplicationInfoOld, "InitializeApplicationInfoOld"},
{100, &ACC_U0::InitializeApplicationInfo, "InitializeApplicationInfo"},
{101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"},
{102, nullptr, "AuthenticateApplicationAsync"},
{103, nullptr, "CheckNetworkServiceAvailabilityAsync"},
@@ -31,7 +31,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{120, nullptr, "CreateGuestLoginRequest"},
{130, nullptr, "LoadOpenContext"},
{131, nullptr, "ListOpenContextStoredUsers"},
{140, nullptr, "InitializeApplicationInfo"},
{140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"},
{141, nullptr, "ListQualifiedUsers"},
{150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"},
};

View File

@@ -0,0 +1,14 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/result.h"
namespace Service::Account {
constexpr ResultCode ERR_ACCOUNTINFO_BAD_APPLICATION{ErrorModule::Account, 22};
constexpr ResultCode ERR_ACCOUNTINFO_ALREADY_INITIALIZED{ErrorModule::Account, 41};
} // namespace Service::Account

View File

@@ -29,7 +29,8 @@
#include "core/hle/service/am/omm.h"
#include "core/hle/service/am/spsm.h"
#include "core/hle/service/am/tcap.h"
#include "core/hle/service/apm/apm.h"
#include "core/hle/service/apm/controller.h"
#include "core/hle/service/apm/interface.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/ns/ns.h"
#include "core/hle/service/nvflinger/nvflinger.h"
@@ -265,12 +266,12 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
{65, nullptr, "ReportUserIsActive"},
{66, nullptr, "GetCurrentIlluminance"},
{67, nullptr, "IsIlluminanceAvailable"},
{68, nullptr, "SetAutoSleepDisabled"},
{69, nullptr, "IsAutoSleepDisabled"},
{68, &ISelfController::SetAutoSleepDisabled, "SetAutoSleepDisabled"},
{69, &ISelfController::IsAutoSleepDisabled, "IsAutoSleepDisabled"},
{70, nullptr, "ReportMultimediaError"},
{71, nullptr, "GetCurrentIlluminanceEx"},
{80, nullptr, "SetWirelessPriorityMode"},
{90, nullptr, "GetAccumulatedSuspendedTickValue"},
{90, &ISelfController::GetAccumulatedSuspendedTickValue, "GetAccumulatedSuspendedTickValue"},
{91, &ISelfController::GetAccumulatedSuspendedTickChangedEvent, "GetAccumulatedSuspendedTickChangedEvent"},
{100, nullptr, "SetAlbumImageTakenNotificationEnabled"},
{1000, nullptr, "GetDebugStorageChannel"},
@@ -283,10 +284,14 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
launchable_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual,
"ISelfController:LaunchableEvent");
// TODO(ogniK): Figure out where, when and why this event gets signalled
// This event is created by AM on the first time GetAccumulatedSuspendedTickChangedEvent() is
// called. Yuzu can just create it unconditionally, since it doesn't need to support multiple
// ISelfControllers. The event is signaled on creation, and on transition from suspended -> not
// suspended if the event has previously been created by a call to
// GetAccumulatedSuspendedTickChangedEvent.
accumulated_suspended_tick_changed_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ISelfController:AccumulatedSuspendedTickChangedEvent");
accumulated_suspended_tick_changed_event.writable->Signal(); // Is signalled on creation
accumulated_suspended_tick_changed_event.writable->Signal();
}
ISelfController::~ISelfController() = default;
@@ -449,11 +454,47 @@ void ISelfController::GetIdleTimeDetectionExtension(Kernel::HLERequestContext& c
rb.Push<u32>(idle_time_detection_extension);
}
void ISelfController::SetAutoSleepDisabled(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
is_auto_sleep_disabled = rp.Pop<bool>();
// On the system itself, if the previous state of is_auto_sleep_disabled
// differed from the current value passed in, it'd signify the internal
// window manager to update (and also increment some statistics like update counts)
//
// It'd also indicate this change to an idle handling context.
//
// However, given we're emulating this behavior, most of this can be ignored
// and it's sufficient to simply set the member variable for querying via
// IsAutoSleepDisabled().
LOG_DEBUG(Service_AM, "called. is_auto_sleep_disabled={}", is_auto_sleep_disabled);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void ISelfController::IsAutoSleepDisabled(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called.");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(is_auto_sleep_disabled);
}
void ISelfController::GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called.");
// This command returns the total number of system ticks since ISelfController creation
// where the game was suspended. Since Yuzu doesn't implement game suspension, this command
// can just always return 0 ticks.
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.Push<u64>(0);
}
void ISelfController::GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx) {
// The implementation of this function is fine as is, the reason we're labelling it as stubbed
// is because we're currently unsure when and where accumulated_suspended_tick_changed_event is
// actually signalled for the time being.
LOG_WARNING(Service_AM, "(STUBBED) called");
LOG_DEBUG(Service_AM, "called.");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
@@ -508,8 +549,9 @@ void AppletMessageQueue::OperationModeChanged() {
on_operation_mode_changed.writable->Signal();
}
ICommonStateGetter::ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_queue)
: ServiceFramework("ICommonStateGetter"), msg_queue(std::move(msg_queue)) {
ICommonStateGetter::ICommonStateGetter(Core::System& system,
std::shared_ptr<AppletMessageQueue> msg_queue)
: ServiceFramework("ICommonStateGetter"), system(system), msg_queue(std::move(msg_queue)) {
// clang-format off
static const FunctionInfo functions[] = {
{0, &ICommonStateGetter::GetEventHandle, "GetEventHandle"},
@@ -542,7 +584,7 @@ ICommonStateGetter::ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_q
{63, nullptr, "GetHdcpAuthenticationStateChangeEvent"},
{64, nullptr, "SetTvPowerStateMatchingMode"},
{65, nullptr, "GetApplicationIdByContentActionName"},
{66, nullptr, "SetCpuBoostMode"},
{66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"},
{80, nullptr, "PerformSystemButtonPressingIfInFocus"},
{90, nullptr, "SetPerformanceConfigurationChangedNotification"},
{91, nullptr, "GetCurrentPerformanceConfiguration"},
@@ -623,6 +665,16 @@ void ICommonStateGetter::GetDefaultDisplayResolution(Kernel::HLERequestContext&
}
}
void ICommonStateGetter::SetCpuBoostMode(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_AM, "called, forwarding to APM:SYS");
const auto& sm = system.ServiceManager();
const auto apm_sys = sm.GetService<APM::APM_Sys>("apm:sys");
ASSERT(apm_sys != nullptr);
apm_sys->SetCpuBoostMode(ctx);
}
IStorage::IStorage(std::vector<u8> buffer)
: ServiceFramework("IStorage"), buffer(std::move(buffer)) {
// clang-format off
@@ -651,13 +703,11 @@ void ICommonStateGetter::GetOperationMode(Kernel::HLERequestContext& ctx) {
}
void ICommonStateGetter::GetPerformanceMode(Kernel::HLERequestContext& ctx) {
const bool use_docked_mode{Settings::values.use_docked_mode};
LOG_DEBUG(Service_AM, "called, use_docked_mode={}", use_docked_mode);
LOG_DEBUG(Service_AM, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push(static_cast<u32>(use_docked_mode ? APM::PerformanceMode::Docked
: APM::PerformanceMode::Handheld));
rb.PushEnum(system.GetAPMController().GetCurrentPerformanceMode());
}
class ILibraryAppletAccessor final : public ServiceFramework<ILibraryAppletAccessor> {

View File

@@ -133,6 +133,9 @@ private:
void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
void SetAutoSleepDisabled(Kernel::HLERequestContext& ctx);
void IsAutoSleepDisabled(Kernel::HLERequestContext& ctx);
void GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx);
void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx);
std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
@@ -141,11 +144,13 @@ private:
u32 idle_time_detection_extension = 0;
u64 num_fatal_sections_entered = 0;
bool is_auto_sleep_disabled = false;
};
class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> {
public:
explicit ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_queue);
explicit ICommonStateGetter(Core::System& system,
std::shared_ptr<AppletMessageQueue> msg_queue);
~ICommonStateGetter() override;
private:
@@ -167,7 +172,9 @@ private:
void GetPerformanceMode(Kernel::HLERequestContext& ctx);
void GetBootMode(Kernel::HLERequestContext& ctx);
void GetDefaultDisplayResolution(Kernel::HLERequestContext& ctx);
void SetCpuBoostMode(Kernel::HLERequestContext& ctx);
Core::System& system;
std::shared_ptr<AppletMessageQueue> msg_queue;
};

View File

@@ -42,7 +42,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ICommonStateGetter>(msg_queue);
rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue);
}
void GetSelfController(Kernel::HLERequestContext& ctx) {
@@ -146,7 +146,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ICommonStateGetter>(msg_queue);
rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue);
}
void GetSelfController(Kernel::HLERequestContext& ctx) {

View File

@@ -80,7 +80,7 @@ private:
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ICommonStateGetter>(msg_queue);
rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue);
}
void GetLibraryAppletCreator(Kernel::HLERequestContext& ctx) {

View File

@@ -459,10 +459,10 @@ void WebBrowser::InitializeOffline() {
case OfflineWebSource::OfflineHtmlPage:
// While there is an AppID TLV field, in official SW this is always ignored.
title_id = 0;
type = FileSys::ContentRecordType::Manual;
type = FileSys::ContentRecordType::HtmlDocument;
break;
case OfflineWebSource::ApplicationLegalInformation:
type = FileSys::ContentRecordType::Legal;
type = FileSys::ContentRecordType::LegalInformation;
break;
case OfflineWebSource::SystemDataPage:
type = FileSys::ContentRecordType::Data;

View File

@@ -2,7 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/apm/apm.h"
#include "core/hle/service/apm/interface.h"
@@ -12,11 +11,15 @@ namespace Service::APM {
Module::Module() = default;
Module::~Module() = default;
void InstallInterfaces(SM::ServiceManager& service_manager) {
void InstallInterfaces(Core::System& system) {
auto module_ = std::make_shared<Module>();
std::make_shared<APM>(module_, "apm")->InstallAsService(service_manager);
std::make_shared<APM>(module_, "apm:p")->InstallAsService(service_manager);
std::make_shared<APM_Sys>()->InstallAsService(service_manager);
std::make_shared<APM>(module_, system.GetAPMController(), "apm")
->InstallAsService(system.ServiceManager());
std::make_shared<APM>(module_, system.GetAPMController(), "apm:p")
->InstallAsService(system.ServiceManager());
std::make_shared<APM>(module_, system.GetAPMController(), "apm:am")
->InstallAsService(system.ServiceManager());
std::make_shared<APM_Sys>(system.GetAPMController())->InstallAsService(system.ServiceManager());
}
} // namespace Service::APM

View File

@@ -8,11 +8,6 @@
namespace Service::APM {
enum class PerformanceMode : u8 {
Handheld = 0,
Docked = 1,
};
class Module final {
public:
Module();
@@ -20,6 +15,6 @@ public:
};
/// Registers all AM services with the specified service manager.
void InstallInterfaces(SM::ServiceManager& service_manager);
void InstallInterfaces(Core::System& system);
} // namespace Service::APM

View File

@@ -0,0 +1,68 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "common/logging/log.h"
#include "core/core_timing.h"
#include "core/hle/service/apm/controller.h"
#include "core/settings.h"
namespace Service::APM {
constexpr PerformanceConfiguration DEFAULT_PERFORMANCE_CONFIGURATION =
PerformanceConfiguration::Config7;
Controller::Controller(Core::Timing::CoreTiming& core_timing)
: core_timing(core_timing), configs{
{PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION},
{PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION},
} {}
Controller::~Controller() = default;
void Controller::SetPerformanceConfiguration(PerformanceMode mode,
PerformanceConfiguration config) {
static const std::map<PerformanceConfiguration, u32> PCONFIG_TO_SPEED_MAP{
{PerformanceConfiguration::Config1, 1020}, {PerformanceConfiguration::Config2, 1020},
{PerformanceConfiguration::Config3, 1224}, {PerformanceConfiguration::Config4, 1020},
{PerformanceConfiguration::Config5, 1020}, {PerformanceConfiguration::Config6, 1224},
{PerformanceConfiguration::Config7, 1020}, {PerformanceConfiguration::Config8, 1020},
{PerformanceConfiguration::Config9, 1020}, {PerformanceConfiguration::Config10, 1020},
{PerformanceConfiguration::Config11, 1020}, {PerformanceConfiguration::Config12, 1020},
{PerformanceConfiguration::Config13, 1785}, {PerformanceConfiguration::Config14, 1785},
{PerformanceConfiguration::Config15, 1020}, {PerformanceConfiguration::Config16, 1020},
};
SetClockSpeed(PCONFIG_TO_SPEED_MAP.find(config)->second);
configs.insert_or_assign(mode, config);
}
void Controller::SetFromCpuBoostMode(CpuBoostMode mode) {
constexpr std::array<PerformanceConfiguration, 3> BOOST_MODE_TO_CONFIG_MAP{{
PerformanceConfiguration::Config7,
PerformanceConfiguration::Config13,
PerformanceConfiguration::Config15,
}};
SetPerformanceConfiguration(PerformanceMode::Docked,
BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode)));
}
PerformanceMode Controller::GetCurrentPerformanceMode() {
return Settings::values.use_docked_mode ? PerformanceMode::Docked : PerformanceMode::Handheld;
}
PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) {
if (configs.find(mode) == configs.end()) {
configs.insert_or_assign(mode, DEFAULT_PERFORMANCE_CONFIGURATION);
}
return configs[mode];
}
void Controller::SetClockSpeed(u32 mhz) {
LOG_INFO(Service_APM, "called, mhz={:08X}", mhz);
// TODO(DarkLordZach): Actually signal core_timing to change clock speed.
}
} // namespace Service::APM

View File

@@ -0,0 +1,70 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <map>
#include "common/common_types.h"
namespace Core::Timing {
class CoreTiming;
}
namespace Service::APM {
enum class PerformanceConfiguration : u32 {
Config1 = 0x00010000,
Config2 = 0x00010001,
Config3 = 0x00010002,
Config4 = 0x00020000,
Config5 = 0x00020001,
Config6 = 0x00020002,
Config7 = 0x00020003,
Config8 = 0x00020004,
Config9 = 0x00020005,
Config10 = 0x00020006,
Config11 = 0x92220007,
Config12 = 0x92220008,
Config13 = 0x92220009,
Config14 = 0x9222000A,
Config15 = 0x9222000B,
Config16 = 0x9222000C,
};
enum class CpuBoostMode : u32 {
Disabled = 0,
Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16
Partial = 2, // GPU Only -> Config 15 or 16
};
enum class PerformanceMode : u8 {
Handheld = 0,
Docked = 1,
};
// Class to manage the state and change of the emulated system performance.
// Specifically, this deals with PerformanceMode, which corresponds to the system being docked or
// undocked, and PerformanceConfig which specifies the exact CPU, GPU, and Memory clocks to operate
// at. Additionally, this manages 'Boost Mode', which allows games to temporarily overclock the
// system during times of high load -- this simply maps to different PerformanceConfigs to use.
class Controller {
public:
Controller(Core::Timing::CoreTiming& core_timing);
~Controller();
void SetPerformanceConfiguration(PerformanceMode mode, PerformanceConfiguration config);
void SetFromCpuBoostMode(CpuBoostMode mode);
PerformanceMode GetCurrentPerformanceMode();
PerformanceConfiguration GetCurrentPerformanceConfiguration(PerformanceMode mode);
private:
void SetClockSpeed(u32 mhz);
std::map<PerformanceMode, PerformanceConfiguration> configs;
Core::Timing::CoreTiming& core_timing;
};
} // namespace Service::APM

View File

@@ -5,43 +5,32 @@
#include "common/logging/log.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/service/apm/apm.h"
#include "core/hle/service/apm/controller.h"
#include "core/hle/service/apm/interface.h"
namespace Service::APM {
class ISession final : public ServiceFramework<ISession> {
public:
ISession() : ServiceFramework("ISession") {
ISession(Controller& controller) : ServiceFramework("ISession"), controller(controller) {
static const FunctionInfo functions[] = {
{0, &ISession::SetPerformanceConfiguration, "SetPerformanceConfiguration"},
{1, &ISession::GetPerformanceConfiguration, "GetPerformanceConfiguration"},
{2, nullptr, "SetCpuOverclockEnabled"},
};
RegisterHandlers(functions);
}
private:
enum class PerformanceConfiguration : u32 {
Config1 = 0x00010000,
Config2 = 0x00010001,
Config3 = 0x00010002,
Config4 = 0x00020000,
Config5 = 0x00020001,
Config6 = 0x00020002,
Config7 = 0x00020003,
Config8 = 0x00020004,
Config9 = 0x00020005,
Config10 = 0x00020006,
Config11 = 0x92220007,
Config12 = 0x92220008,
};
void SetPerformanceConfiguration(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
auto mode = static_cast<PerformanceMode>(rp.Pop<u32>());
u32 config = rp.Pop<u32>();
LOG_WARNING(Service_APM, "(STUBBED) called mode={} config={}", static_cast<u32>(mode),
config);
const auto mode = rp.PopEnum<PerformanceMode>();
const auto config = rp.PopEnum<PerformanceConfiguration>();
LOG_DEBUG(Service_APM, "called mode={} config={}", static_cast<u32>(mode),
static_cast<u32>(config));
controller.SetPerformanceConfiguration(mode, config);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
@@ -50,20 +39,23 @@ private:
void GetPerformanceConfiguration(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
auto mode = static_cast<PerformanceMode>(rp.Pop<u32>());
LOG_WARNING(Service_APM, "(STUBBED) called mode={}", static_cast<u32>(mode));
const auto mode = rp.PopEnum<PerformanceMode>();
LOG_DEBUG(Service_APM, "called mode={}", static_cast<u32>(mode));
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(static_cast<u32>(PerformanceConfiguration::Config1));
rb.PushEnum(controller.GetCurrentPerformanceConfiguration(mode));
}
Controller& controller;
};
APM::APM(std::shared_ptr<Module> apm, const char* name)
: ServiceFramework(name), apm(std::move(apm)) {
APM::APM(std::shared_ptr<Module> apm, Controller& controller, const char* name)
: ServiceFramework(name), apm(std::move(apm)), controller(controller) {
static const FunctionInfo functions[] = {
{0, &APM::OpenSession, "OpenSession"},
{1, nullptr, "GetPerformanceMode"},
{1, &APM::GetPerformanceMode, "GetPerformanceMode"},
{6, nullptr, "IsCpuOverclockEnabled"},
};
RegisterHandlers(functions);
}
@@ -75,10 +67,17 @@ void APM::OpenSession(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISession>();
rb.PushIpcInterface<ISession>(controller);
}
APM_Sys::APM_Sys() : ServiceFramework{"apm:sys"} {
void APM::GetPerformanceMode(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_APM, "called");
IPC::ResponseBuilder rb{ctx, 2};
rb.PushEnum(controller.GetCurrentPerformanceMode());
}
APM_Sys::APM_Sys(Controller& controller) : ServiceFramework{"apm:sys"}, controller(controller) {
// clang-format off
static const FunctionInfo functions[] = {
{0, nullptr, "RequestPerformanceMode"},
@@ -87,8 +86,8 @@ APM_Sys::APM_Sys() : ServiceFramework{"apm:sys"} {
{3, nullptr, "GetLastThrottlingState"},
{4, nullptr, "ClearLastThrottlingState"},
{5, nullptr, "LoadAndApplySettings"},
{6, nullptr, "SetCpuBoostMode"},
{7, nullptr, "GetCurrentPerformanceConfiguration"},
{6, &APM_Sys::SetCpuBoostMode, "SetCpuBoostMode"},
{7, &APM_Sys::GetCurrentPerformanceConfiguration, "GetCurrentPerformanceConfiguration"},
};
// clang-format on
@@ -102,7 +101,28 @@ void APM_Sys::GetPerformanceEvent(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<ISession>();
rb.PushIpcInterface<ISession>(controller);
}
void APM_Sys::SetCpuBoostMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto mode = rp.PopEnum<CpuBoostMode>();
LOG_DEBUG(Service_APM, "called, mode={:08X}", static_cast<u32>(mode));
controller.SetFromCpuBoostMode(mode);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void APM_Sys::GetCurrentPerformanceConfiguration(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_APM, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.PushEnum(
controller.GetCurrentPerformanceConfiguration(controller.GetCurrentPerformanceMode()));
}
} // namespace Service::APM

View File

@@ -8,24 +8,34 @@
namespace Service::APM {
class Controller;
class Module;
class APM final : public ServiceFramework<APM> {
public:
explicit APM(std::shared_ptr<Module> apm, const char* name);
explicit APM(std::shared_ptr<Module> apm, Controller& controller, const char* name);
~APM() override;
private:
void OpenSession(Kernel::HLERequestContext& ctx);
void GetPerformanceMode(Kernel::HLERequestContext& ctx);
std::shared_ptr<Module> apm;
Controller& controller;
};
class APM_Sys final : public ServiceFramework<APM_Sys> {
public:
explicit APM_Sys();
explicit APM_Sys(Controller& controller);
~APM_Sys() override;
void SetCpuBoostMode(Kernel::HLERequestContext& ctx);
private:
void GetPerformanceEvent(Kernel::HLERequestContext& ctx);
void GetCurrentPerformanceConfiguration(Kernel::HLERequestContext& ctx);
Controller& controller;
};
} // namespace Service::APM

View File

@@ -25,7 +25,8 @@ namespace Service::Audio {
class IAudioRenderer final : public ServiceFramework<IAudioRenderer> {
public:
explicit IAudioRenderer(AudioCore::AudioRendererParameter audren_params)
explicit IAudioRenderer(AudioCore::AudioRendererParameter audren_params,
const std::size_t instance_number)
: ServiceFramework("IAudioRenderer") {
// clang-format off
static const FunctionInfo functions[] = {
@@ -48,8 +49,8 @@ public:
auto& system = Core::System::GetInstance();
system_event = Kernel::WritableEvent::CreateEventPair(
system.Kernel(), Kernel::ResetType::Manual, "IAudioRenderer:SystemEvent");
renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), audren_params,
system_event.writable);
renderer = std::make_unique<AudioCore::AudioRenderer>(
system.CoreTiming(), audren_params, system_event.writable, instance_number);
}
private:
@@ -167,13 +168,12 @@ public:
{3, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceName"},
{4, &IAudioDevice::QueryAudioDeviceSystemEvent, "QueryAudioDeviceSystemEvent"},
{5, &IAudioDevice::GetActiveChannelCount, "GetActiveChannelCount"},
{6, &IAudioDevice::ListAudioDeviceName,
"ListAudioDeviceNameAuto"}, // TODO(ogniK): Confirm if autos are identical to non auto
{6, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceNameAuto"},
{7, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolumeAuto"},
{8, nullptr, "GetAudioDeviceOutputVolumeAuto"},
{10, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceNameAuto"},
{11, nullptr, "QueryAudioDeviceInputEvent"},
{12, nullptr, "QueryAudioDeviceOutputEvent"},
{12, &IAudioDevice::QueryAudioDeviceOutputEvent, "QueryAudioDeviceOutputEvent"},
{13, nullptr, "GetAudioSystemMasterVolumeSetting"},
};
RegisterHandlers(functions);
@@ -181,6 +181,11 @@ public:
auto& kernel = Core::System::GetInstance().Kernel();
buffer_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IAudioOutBufferReleasedEvent");
// Should only be signalled when an audio output device has been changed, example: speaker
// to headset
audio_output_device_switch_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Automatic, "IAudioDevice:AudioOutputDeviceSwitchedEvent");
}
private:
@@ -237,7 +242,16 @@ private:
rb.Push<u32>(1);
}
void QueryAudioDeviceOutputEvent(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Audio, "called");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
rb.PushCopyObjects(audio_output_device_switch_event.readable);
}
Kernel::EventPair buffer_event;
Kernel::EventPair audio_output_device_switch_event;
}; // namespace Audio
@@ -594,7 +608,7 @@ void AudRenU::OpenAudioRendererImpl(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<IAudioRenderer>(params);
rb.PushIpcInterface<IAudioRenderer>(params, audren_instance_count++);
}
bool AudRenU::IsFeatureSupported(AudioFeatures feature, u32_le revision) const {

View File

@@ -33,6 +33,7 @@ private:
};
bool IsFeatureSupported(AudioFeatures feature, u32_le revision) const;
std::size_t audren_instance_count = 0;
};
} // namespace Service::Audio

View File

@@ -472,12 +472,12 @@ void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite) {
}
}
void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs) {
void InstallInterfaces(Core::System& system) {
romfs_factory = nullptr;
CreateFactories(vfs, false);
std::make_shared<FSP_LDR>()->InstallAsService(service_manager);
std::make_shared<FSP_PR>()->InstallAsService(service_manager);
std::make_shared<FSP_SRV>()->InstallAsService(service_manager);
CreateFactories(*system.GetFilesystem(), false);
std::make_shared<FSP_LDR>()->InstallAsService(system.ServiceManager());
std::make_shared<FSP_PR>()->InstallAsService(system.ServiceManager());
std::make_shared<FSP_SRV>(system.GetReporter())->InstallAsService(system.ServiceManager());
}
} // namespace Service::FileSystem

View File

@@ -65,7 +65,7 @@ FileSys::VirtualDir GetModificationDumpRoot(u64 title_id);
// above is called.
void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite = true);
void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs);
void InstallInterfaces(Core::System& system);
// A class that wraps a VfsDirectory with methods that return ResultVal and ResultCode instead of
// pointers and booleans. This makes using a VfsDirectory with switch services much easier and

View File

@@ -26,6 +26,7 @@
#include "core/hle/kernel/process.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/filesystem/fsp_srv.h"
#include "core/reporter.h"
namespace Service::FileSystem {
@@ -613,7 +614,7 @@ private:
u64 next_entry_index = 0;
};
FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") {
FSP_SRV::FSP_SRV(const Core::Reporter& reporter) : ServiceFramework("fsp-srv"), reporter(reporter) {
// clang-format off
static const FunctionInfo functions[] = {
{0, nullptr, "OpenFileSystem"},
@@ -710,14 +711,14 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") {
{1001, nullptr, "SetSaveDataSize"},
{1002, nullptr, "SetSaveDataRootPath"},
{1003, nullptr, "DisableAutoSaveDataCreation"},
{1004, nullptr, "SetGlobalAccessLogMode"},
{1004, &FSP_SRV::SetGlobalAccessLogMode, "SetGlobalAccessLogMode"},
{1005, &FSP_SRV::GetGlobalAccessLogMode, "GetGlobalAccessLogMode"},
{1006, nullptr, "OutputAccessLogToSdCard"},
{1006, &FSP_SRV::OutputAccessLogToSdCard, "OutputAccessLogToSdCard"},
{1007, nullptr, "RegisterUpdatePartition"},
{1008, nullptr, "OpenRegisteredUpdatePartition"},
{1009, nullptr, "GetAndClearMemoryReportInfo"},
{1010, nullptr, "SetDataStorageRedirectTarget"},
{1011, nullptr, "OutputAccessLogToSdCard2"},
{1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"},
{1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"},
{1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"},
{1200, nullptr, "OpenMultiCommitManager"},
@@ -814,21 +815,22 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext&
rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space));
}
void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
log_mode = rp.PopEnum<LogMode>();
LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode));
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_FS, "(STUBBED) called");
LOG_DEBUG(Service_FS, "called");
enum class LogMode : u32 {
Off,
Log,
RedirectToSdCard,
LogToSdCard = Log | RedirectToSdCard,
};
// Given we always want to receive logging information,
// we always specify logging as enabled.
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.PushEnum(LogMode::Log);
rb.PushEnum(log_mode);
}
void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
@@ -902,4 +904,26 @@ void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ct
rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
}
void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) {
const auto raw = ctx.ReadBuffer();
auto log = Common::StringFromFixedZeroTerminatedBuffer(
reinterpret_cast<const char*>(raw.data()), raw.size());
LOG_DEBUG(Service_FS, "called, log='{}'", log);
reporter.SaveFilesystemAccessReport(log_mode, std::move(log));
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void FSP_SRV::GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_FS, "called");
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
rb.PushEnum(AccessLogVersion::Latest);
rb.Push(access_log_program_index);
}
} // namespace Service::FileSystem

View File

@@ -7,15 +7,32 @@
#include <memory>
#include "core/hle/service/service.h"
namespace Core {
class Reporter;
}
namespace FileSys {
class FileSystemBackend;
}
namespace Service::FileSystem {
enum class AccessLogVersion : u32 {
V7_0_0 = 2,
Latest = V7_0_0,
};
enum class LogMode : u32 {
Off,
Log,
RedirectToSdCard,
LogToSdCard = Log | RedirectToSdCard,
};
class FSP_SRV final : public ServiceFramework<FSP_SRV> {
public:
explicit FSP_SRV();
explicit FSP_SRV(const Core::Reporter& reporter);
~FSP_SRV() override;
private:
@@ -26,13 +43,20 @@ private:
void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx);
void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx);
void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx);
void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx);
FileSys::VirtualFile romfs;
u64 current_process_id = 0;
u32 access_log_program_index = 0;
LogMode log_mode = LogMode::LogToSdCard;
const Core::Reporter& reporter;
};
} // namespace Service::FileSystem

View File

@@ -0,0 +1,12 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/result.h"
namespace Service::Friend {
constexpr ResultCode ERR_NO_NOTIFICATIONS{ErrorModule::Account, 15};
}

View File

@@ -2,8 +2,13 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <queue>
#include "common/logging/log.h"
#include "common/uuid.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/service/friend/errors.h"
#include "core/hle/service/friend/friend.h"
#include "core/hle/service/friend/interface.h"
@@ -17,7 +22,7 @@ public:
{0, nullptr, "GetCompletionEvent"},
{1, nullptr, "Cancel"},
{10100, nullptr, "GetFriendListIds"},
{10101, nullptr, "GetFriendList"},
{10101, &IFriendService::GetFriendList, "GetFriendList"},
{10102, nullptr, "UpdateFriendInfo"},
{10110, nullptr, "GetFriendProfileImage"},
{10200, nullptr, "SendFriendRequestForApplication"},
@@ -94,6 +99,23 @@ public:
}
private:
enum class PresenceFilter : u32 {
None = 0,
Online = 1,
OnlinePlay = 2,
OnlineOrOnlinePlay = 3,
};
struct SizedFriendFilter {
PresenceFilter presence;
u8 is_favorite;
u8 same_app;
u8 same_app_played;
u8 arbitary_app_played;
u64 group_id;
};
static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size");
void DeclareCloseOnlinePlaySession(Kernel::HLERequestContext& ctx) {
// Stub used by Splatoon 2
LOG_WARNING(Service_ACC, "(STUBBED) called");
@@ -107,6 +129,121 @@ private:
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void GetFriendList(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto friend_offset = rp.Pop<u32>();
const auto uuid = rp.PopRaw<Common::UUID>();
[[maybe_unused]] const auto filter = rp.PopRaw<SizedFriendFilter>();
const auto pid = rp.Pop<u64>();
LOG_WARNING(Service_ACC, "(STUBBED) called, offset={}, uuid={}, pid={}", friend_offset,
uuid.Format(), pid);
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
rb.Push<u32>(0); // Friend count
// TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId"
}
};
class INotificationService final : public ServiceFramework<INotificationService> {
public:
INotificationService(Common::UUID uuid) : ServiceFramework("INotificationService"), uuid(uuid) {
// clang-format off
static const FunctionInfo functions[] = {
{0, &INotificationService::GetEvent, "GetEvent"},
{1, &INotificationService::Clear, "Clear"},
{2, &INotificationService::Pop, "Pop"}
};
// clang-format on
RegisterHandlers(functions);
}
private:
void GetEvent(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
if (!is_event_created) {
auto& kernel = Core::System::GetInstance().Kernel();
notification_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "INotificationService:NotifyEvent");
is_event_created = true;
}
rb.PushCopyObjects(notification_event.readable);
}
void Clear(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
while (!notifications.empty()) {
notifications.pop();
}
std::memset(&states, 0, sizeof(States));
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Pop(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
if (notifications.empty()) {
LOG_ERROR(Service_ACC, "No notifications in queue!");
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(ERR_NO_NOTIFICATIONS);
return;
}
const auto notification = notifications.front();
notifications.pop();
switch (notification.notification_type) {
case NotificationTypes::HasUpdatedFriendsList:
states.has_updated_friends = false;
break;
case NotificationTypes::HasReceivedFriendRequest:
states.has_received_friend_request = false;
break;
default:
// HOS seems not have an error case for an unknown notification
LOG_WARNING(Service_ACC, "Unknown notification {:08X}",
static_cast<u32>(notification.notification_type));
break;
}
IPC::ResponseBuilder rb{ctx, 6};
rb.Push(RESULT_SUCCESS);
rb.PushRaw<SizedNotificationInfo>(notification);
}
enum class NotificationTypes : u32 {
HasUpdatedFriendsList = 0x65,
HasReceivedFriendRequest = 0x1
};
struct SizedNotificationInfo {
NotificationTypes notification_type;
INSERT_PADDING_WORDS(
1); // TODO(ogniK): This doesn't seem to be used within any IPC returns as of now
u64_le account_id;
};
static_assert(sizeof(SizedNotificationInfo) == 0x10,
"SizedNotificationInfo is an incorrect size");
struct States {
bool has_updated_friends;
bool has_received_friend_request;
};
Common::UUID uuid;
bool is_event_created = false;
Kernel::EventPair notification_event;
std::queue<SizedNotificationInfo> notifications;
States states{};
};
void Module::Interface::CreateFriendService(Kernel::HLERequestContext& ctx) {
@@ -116,6 +253,17 @@ void Module::Interface::CreateFriendService(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
}
void Module::Interface::CreateNotificationService(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
auto uuid = rp.PopRaw<Common::UUID>();
LOG_DEBUG(Service_ACC, "called, uuid={}", uuid.Format());
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
rb.PushIpcInterface<INotificationService>(uuid);
}
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
: ServiceFramework(name), module(std::move(module)) {}

View File

@@ -16,6 +16,7 @@ public:
~Interface() override;
void CreateFriendService(Kernel::HLERequestContext& ctx);
void CreateNotificationService(Kernel::HLERequestContext& ctx);
protected:
std::shared_ptr<Module> module;

View File

@@ -10,7 +10,7 @@ Friend::Friend(std::shared_ptr<Module> module, const char* name)
: Interface(std::move(module), name) {
static const FunctionInfo functions[] = {
{0, &Friend::CreateFriendService, "CreateFriendService"},
{1, nullptr, "CreateNotificationService"},
{1, &Friend::CreateNotificationService, "CreateNotificationService"},
{2, nullptr, "CreateDaemonSuspendSessionService"},
};
RegisterHandlers(functions);

View File

@@ -548,6 +548,37 @@ void Controller_NPad::DisconnectNPad(u32 npad_id) {
connected_controllers[NPadIdToIndex(npad_id)].is_connected = false;
}
void Controller_NPad::StartLRAssignmentMode() {
// Nothing internally is used for lr assignment mode. Since we have the ability to set the
// controller types from boot, it doesn't really matter about showing a selection screen
is_in_lr_assignment_mode = true;
}
void Controller_NPad::StopLRAssignmentMode() {
is_in_lr_assignment_mode = false;
}
bool Controller_NPad::SwapNpadAssignment(u32 npad_id_1, u32 npad_id_2) {
if (npad_id_1 == NPAD_HANDHELD || npad_id_2 == NPAD_HANDHELD || npad_id_1 == NPAD_UNKNOWN ||
npad_id_2 == NPAD_UNKNOWN) {
return true;
}
const auto npad_index_1 = NPadIdToIndex(npad_id_1);
const auto npad_index_2 = NPadIdToIndex(npad_id_2);
if (!IsControllerSupported(connected_controllers[npad_index_1].type) ||
!IsControllerSupported(connected_controllers[npad_index_2].type)) {
return false;
}
std::swap(connected_controllers[npad_index_1].type, connected_controllers[npad_index_2].type);
InitNewlyAddedControler(npad_index_1);
InitNewlyAddedControler(npad_index_2);
return true;
}
bool Controller_NPad::IsControllerSupported(NPadControllerType controller) {
if (controller == NPadControllerType::Handheld) {
// Handheld is not even a supported type, lets stop here

View File

@@ -124,6 +124,10 @@ public:
void ConnectAllDisconnectedControllers();
void ClearAllControllers();
void StartLRAssignmentMode();
void StopLRAssignmentMode();
bool SwapNpadAssignment(u32 npad_id_1, u32 npad_id_2);
// Logical OR for all buttons presses on all controllers
// Specifically for cheat engine and other features.
u32 GetAndResetPressState();
@@ -321,5 +325,6 @@ private:
void RequestPadStateUpdate(u32 npad_id);
std::array<ControllerPad, 10> npad_pad_states{};
bool IsControllerSupported(NPadControllerType controller);
bool is_in_lr_assignment_mode{false};
};
} // namespace Service::HID

View File

@@ -0,0 +1,13 @@
// Copyright 2019 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "core/hle/result.h"
namespace Service::HID {
constexpr ResultCode ERR_NPAD_NOT_CONNECTED{ErrorModule::HID, 710};
} // namespace Service::HID

View File

@@ -16,6 +16,7 @@
#include "core/hle/kernel/readable_event.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/kernel/writable_event.h"
#include "core/hle/service/hid/errors.h"
#include "core/hle/service/hid/hid.h"
#include "core/hle/service/hid/irs.h"
#include "core/hle/service/hid/xcd.h"
@@ -202,11 +203,11 @@ Hid::Hid() : ServiceFramework("hid") {
{123, nullptr, "SetNpadJoyAssignmentModeSingleByDefault"},
{124, &Hid::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"},
{125, &Hid::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"},
{126, nullptr, "StartLrAssignmentMode"},
{127, nullptr, "StopLrAssignmentMode"},
{126, &Hid::StartLrAssignmentMode, "StartLrAssignmentMode"},
{127, &Hid::StopLrAssignmentMode, "StopLrAssignmentMode"},
{128, &Hid::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"},
{129, nullptr, "GetNpadHandheldActivationMode"},
{130, nullptr, "SwapNpadAssignment"},
{130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"},
{131, nullptr, "IsUnintendedHomeButtonInputProtectionEnabled"},
{132, nullptr, "EnableUnintendedHomeButtonInputProtection"},
{133, nullptr, "SetNpadJoyAssignmentModeSingleWithDestination"},
@@ -733,6 +734,49 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
rb.Push(RESULT_SUCCESS);
}
void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StartLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
controller.StopLRAssignmentMode();
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto npad_1{rp.Pop<u32>()};
const auto npad_2{rp.Pop<u32>()};
const auto applet_resource_user_id{rp.Pop<u64>()};
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}",
applet_resource_user_id, npad_1, npad_2);
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
IPC::ResponseBuilder rb{ctx, 2};
if (controller.SwapNpadAssignment(npad_1, npad_2)) {
rb.Push(RESULT_SUCCESS);
} else {
LOG_ERROR(Service_HID, "Npads are not connected!");
rb.Push(ERR_NPAD_NOT_CONNECTED);
}
}
class HidDbg final : public ServiceFramework<HidDbg> {
public:
explicit HidDbg() : ServiceFramework{"hid:dbg"} {

View File

@@ -119,6 +119,9 @@ private:
void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
void StartLrAssignmentMode(Kernel::HLERequestContext& ctx);
void StopLrAssignmentMode(Kernel::HLERequestContext& ctx);
void SwapNpadAssignment(Kernel::HLERequestContext& ctx);
std::shared_ptr<IAppletResource> applet_resource;
};

View File

@@ -345,14 +345,16 @@ public:
vm_manager
.MirrorMemory(*map_address, nro_address, nro_size, Kernel::MemoryState::ModuleCode)
.IsSuccess());
ASSERT(vm_manager.UnmapRange(nro_address, nro_size).IsSuccess());
ASSERT(vm_manager.ReprotectRange(nro_address, nro_size, Kernel::VMAPermission::None)
.IsSuccess());
if (bss_size > 0) {
ASSERT(vm_manager
.MirrorMemory(*map_address + nro_size, bss_address, bss_size,
Kernel::MemoryState::ModuleCode)
.IsSuccess());
ASSERT(vm_manager.UnmapRange(bss_address, bss_size).IsSuccess());
ASSERT(vm_manager.ReprotectRange(bss_address, bss_size, Kernel::VMAPermission::None)
.IsSuccess());
}
vm_manager.ReprotectRange(*map_address, header.text_size,
@@ -364,7 +366,8 @@ public:
Core::System::GetInstance().InvalidateCpuInstructionCaches();
nro.insert_or_assign(*map_address, NROInfo{hash, nro_size + bss_size});
nro.insert_or_assign(*map_address,
NROInfo{hash, nro_address, nro_size, bss_address, bss_size});
IPC::ResponseBuilder rb{ctx, 4};
rb.Push(RESULT_SUCCESS);
@@ -409,9 +412,23 @@ public:
}
auto& vm_manager = Core::CurrentProcess()->VMManager();
const auto& nro_size = iter->second.size;
const auto& nro_info = iter->second;
ASSERT(vm_manager.UnmapRange(nro_address, nro_size).IsSuccess());
// Unmap the mirrored memory
ASSERT(
vm_manager.UnmapRange(nro_address, nro_info.nro_size + nro_info.bss_size).IsSuccess());
// Reprotect the source memory
ASSERT(vm_manager
.ReprotectRange(nro_info.nro_address, nro_info.nro_size,
Kernel::VMAPermission::ReadWrite)
.IsSuccess());
if (nro_info.bss_size > 0) {
ASSERT(vm_manager
.ReprotectRange(nro_info.bss_address, nro_info.bss_size,
Kernel::VMAPermission::ReadWrite)
.IsSuccess());
}
Core::System::GetInstance().InvalidateCpuInstructionCaches();
@@ -473,7 +490,10 @@ private:
struct NROInfo {
SHA256Hash hash;
u64 size;
VAddr nro_address;
u64 nro_size;
VAddr bss_address;
u64 bss_size;
};
bool initialized = false;

View File

@@ -48,7 +48,7 @@ public:
{19, nullptr, "Export"},
{20, nullptr, "IsBrokenDatabaseWithClearFlag"},
{21, &IDatabaseService::GetIndex, "GetIndex"},
{22, nullptr, "SetInterfaceVersion"},
{22, &IDatabaseService::SetInterfaceVersion, "SetInterfaceVersion"},
{23, nullptr, "Convert"},
};
// clang-format on
@@ -350,8 +350,22 @@ private:
rb.Push(index);
}
void SetInterfaceVersion(Kernel::HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
current_interface_version = rp.PopRaw<u32>();
LOG_DEBUG(Service_Mii, "called, interface_version={:08X}", current_interface_version);
UNIMPLEMENTED_IF(current_interface_version != 1);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(RESULT_SUCCESS);
}
MiiManager db;
u32 current_interface_version = 0;
// Last read offsets of Get functions
std::array<u32, 4> offsets{};
};

Some files were not shown because too many files have changed in this diff Show More