Compare commits

...

66 Commits

Author SHA1 Message Date
yuzubot
ab2af23e47 Android #172 2023-12-26 00:56:59 +00:00
yuzubot
52d3d87d09 Merge PR 12467 2023-12-26 00:56:59 +00:00
yuzubot
30f76ed9c2 Merge PR 12466 2023-12-26 00:56:59 +00:00
yuzubot
c85f87ffa4 Merge PR 12449 2023-12-26 00:56:59 +00:00
yuzubot
7778f1906d Merge PR 12448 2023-12-26 00:56:59 +00:00
Fernando S
05e3db3ac9 Merge pull request #12394 from liamwhite/per-process-memory
general: properly support multiple memory instances
2023-12-24 16:23:14 +01:00
Liam
c57ae803a6 kernel: fix resource limit imbalance 2023-12-22 21:52:49 -05:00
Liam
db7b2bc8f1 kernel: restrict nce to applications 2023-12-22 21:52:49 -05:00
Liam
31bf57a310 general: properly support multiple memory instances 2023-12-22 21:52:49 -05:00
Liam
cae675343c k_server_session: remove scratch buffer usage in favor of direct copy 2023-12-22 21:52:49 -05:00
Liam
35501ba41c k_server_session: process for guest servers 2023-12-22 21:52:49 -05:00
Liam
419055e484 kernel: instantiate memory separately for each guest process 2023-12-22 21:52:49 -05:00
liamwhite
91290b9be4 Merge pull request #12412 from ameerj/gl-query-prims
OpenGL: Add GL_PRIMITIVES_GENERATED and GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN queries
2023-12-22 11:42:05 -05:00
Fernando S
820f113d9e Merge pull request #12435 from liamwhite/type-check
shader_recompiler: ensure derivatives for textureGrad are f32
2023-12-22 17:41:13 +01:00
Fernando S
373a1ff2ce Merge pull request #12410 from liamwhite/more-mali-null
renderer_vulkan: don't pass null view when nullDescriptor is not supported
2023-12-22 17:40:47 +01:00
Fernando S
4d6b6ba76c Merge pull request #12432 from liamwhite/float-write
shader_recompiler: use float image operations on load/store when required
2023-12-22 17:40:26 +01:00
Liam
4aa713e861 shader_recompiler: ensure derivatives for textureGrad are f32 2023-12-21 19:06:33 -05:00
Liam
9e9aed41be shader_recompiler: use float image operations on load/store when required 2023-12-21 14:34:46 -05:00
liamwhite
3d268b8480 Merge pull request #12424 from t895/vsync-per-game-qt
qt: settings: Fix per-game vsync combobox
2023-12-21 10:53:06 -05:00
liamwhite
ad7445d4cc Merge pull request #12425 from german77/temp-fix
service: hid: Fix crash on InitializeVibrationDevice
2023-12-21 10:50:22 -05:00
liamwhite
3a30271219 Merge pull request #12426 from t895/reload-text-fix
android: Fix "No games found" text appearing on load
2023-12-21 10:50:11 -05:00
t895
bb5196aaae qt: settings: Fix per-game vsync combobox 2023-12-21 01:15:05 -05:00
t895
d3070cafa7 android: Fix "No games found" text appearing on load 2023-12-21 00:49:22 -05:00
Narr the Reg
5cd3b6f58c service: hid: Fix crash on InitializeVibrationDevice 2023-12-20 22:52:36 -06:00
liamwhite
bedc758fe7 Merge pull request #12414 from jbeich/vk274
externals: update Vulkan-Headers to v1.3.274
2023-12-20 12:46:50 -05:00
liamwhite
76701185ad Merge pull request #12400 from ameerj/vk-query-prefix-fix
vk_query_cache: Fix prefix sum max_accumulation_limit logic
2023-12-20 12:46:41 -05:00
Fernando S
f1cb14eb54 Merge pull request #12417 from liamwhite/arm64-gcc-fix
nce: hide shadowing warnings from dynarmic headers
2023-12-20 18:46:08 +01:00
Fernando S
f4f4a469a9 Merge pull request #12409 from liamwhite/bits-and-bytes
nce: fix read size in simd immediate emulation
2023-12-20 18:45:44 +01:00
Fernando S
9e5b4052ed Merge pull request #12403 from liamwhite/clipdistance
shader_recompiler: use minimal clip distance array
2023-12-20 18:45:20 +01:00
Fernando S
234867b84d Merge pull request #12390 from liamwhite/binding-insanity
renderer_vulkan: work around turnip binding bug in a610
2023-12-20 18:44:47 +01:00
Ameer J
61e8c5f798 gl_rasterizer: Less spammy log for unimplemented resets 2023-12-20 11:51:44 -05:00
Liam
4b60aec190 nce: hide shadowing warnings from dynarmic headers 2023-12-20 11:07:50 -05:00
Jan Beich
ecfba79d98 externals: update Vulkan-Headers to v1.3.274 2023-12-20 01:13:09 +01:00
Jan Beich
310834aea2 vulkan_common: unbreak build with Vulkan-Headers 1.3.274
src/video_core/vulkan_common/vulkan_wrapper.cpp:293:13: error: enumeration value 'VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR' not handled in switch [-Werror,-Wswitch]
    switch (result) {
            ^~~~~~
2023-12-20 01:12:41 +01:00
liamwhite
6a1fa9bb17 Merge pull request #12411 from ameerj/gl-nv-tfb-fixups
gl_buffer_cache: Reintroduce NV_vertex_buffer_unified_memory
2023-12-19 18:36:50 -05:00
Ameer J
db8a601cf8 OpenGL: Add GL_PRIMITIVES_GENERATED and GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN queries 2023-12-19 17:32:31 -05:00
Ameer J
1bb76201e6 gl_rasterizer: Silence spammy logs 2023-12-19 17:13:23 -05:00
Ameer J
372bca5945 gl_buffer_cache: Reintroduce NV_vertex_buffer_unified_memory
Workaround Nvidia drivers complaining when a buffer is bound as both a vertex buffer and transform feedback buffer
2023-12-19 17:13:23 -05:00
Liam
93c19a40bf nce: increase handler stack size 2023-12-19 15:24:13 -05:00
Liam
d0a75580da renderer_vulkan: don't pass null view when nullDescriptor is not supported 2023-12-19 15:13:10 -05:00
Charles Lombardo
345ec25532 Merge pull request #12408 from german77/lang
yuzu: Read/Save category Paths
2023-12-19 14:40:10 -05:00
Liam
a94721fde0 nce: fix read size in simd immediate emulation 2023-12-19 12:51:19 -05:00
Narr the Reg
816c7a8d1f yuzu: Read/Save category Paths 2023-12-19 11:34:53 -06:00
Fernando S
efe52db690 Merge pull request #12382 from liamwhite/image-limit
renderer_vulkan: allow up to 7 swapchain images
2023-12-19 16:15:40 +01:00
Fernando S
d61df0f400 Merge pull request #12387 from liamwhite/oboe
android: add oboe audio sink
2023-12-19 16:15:07 +01:00
Fernando S
b14547b8b6 Merge pull request #12392 from liamwhite/mode
fs: implement OpenDirectoryMode
2023-12-19 16:14:29 +01:00
Fernando S
97ad3e7530 Merge pull request #12391 from yuzu-emu/revert-12344-its-free-real-estate
Revert "video_core: use interval map for page count tracking"
2023-12-19 16:14:09 +01:00
Fernando S
0589a32f75 Merge pull request #12304 from liamwhite/flinger-wtf
nvnflinger: mark buffer as acquired when acquired
2023-12-19 16:12:56 +01:00
liamwhite
617dc0f822 Merge pull request #12402 from german77/lang
yuzu: Make language persistent and remove symbols_path
2023-12-18 23:10:59 -05:00
Liam
fcfa8b680b shader_recompiler: use minimal clip distance array 2023-12-18 22:25:14 -05:00
Liam
94244437de shader_recompiler: ignore clip distances beyond driver support level 2023-12-18 22:25:14 -05:00
Narr the Reg
53956a2990 yuzu: Make language persistent and remove symbols_path 2023-12-18 20:28:55 -06:00
Liam
a7731abb72 oboe_sink: specify additional required parameters 2023-12-18 17:27:32 -05:00
liamwhite
50fd029eaa Merge pull request #12349 from Kelebek1/return_system_channels_active
Have GetActiveChannelCount return the system channels instead of host device channels
2023-12-18 15:06:16 -05:00
Ameer J
a2b567dfd6 vk_query_cache: Fix prefix sum max_accumulation_limit logic 2023-12-18 12:37:55 -05:00
Liam
b770f6a985 fs: implement OpenDirectoryMode 2023-12-18 00:12:38 -05:00
Liam
797e8fdbc3 oboe_sink: set low latency performance mode 2023-12-17 21:05:00 -05:00
liamwhite
65e646eeba Revert "video_core: use interval map for page count tracking" 2023-12-17 18:59:49 -05:00
Liam
fba3fa705d renderer_vulkan: work around turnip binding bug in a610 2023-12-17 15:45:09 -05:00
Liam
6ca530a721 android: add oboe to audio configuration 2023-12-17 11:44:49 -05:00
Liam
e01c535178 oboe_sink: implement channel count querying 2023-12-17 10:10:14 -05:00
Liam
7239547ead android: add oboe audio sink 2023-12-17 01:42:59 -05:00
Liam
7fc06260d1 renderer_vulkan: allow up to 7 swapchain images 2023-12-16 18:59:44 -05:00
Liam
fcc85abe27 nvnflinger: mark buffer as acquired when acquired 2023-12-16 13:40:04 -05:00
Liam
6851e93296 audio: skip coefficient normalization for downmix 2023-12-16 13:05:55 -05:00
Kelebek1
ffbba74c91 Have GetActiveChannelCount return the system channels instead of host device channels 2023-12-16 12:49:28 -05:00
121 changed files with 3300 additions and 977 deletions

View File

@@ -142,6 +142,9 @@ if (YUZU_USE_BUNDLED_VCPKG)
if (ENABLE_WEB_SERVICE)
list(APPEND VCPKG_MANIFEST_FEATURES "web-service")
endif()
if (ANDROID)
list(APPEND VCPKG_MANIFEST_FEATURES "android")
endif()
include(${CMAKE_SOURCE_DIR}/externals/vcpkg/scripts/buildsystems/vcpkg.cmake)
elseif(NOT "$ENV{VCPKG_TOOLCHAIN_FILE}" STREQUAL "")
@@ -302,7 +305,7 @@ find_package(ZLIB 1.2 REQUIRED)
find_package(zstd 1.5 REQUIRED)
if (NOT YUZU_USE_EXTERNAL_VULKAN_HEADERS)
find_package(Vulkan 1.3.256 REQUIRED)
find_package(Vulkan 1.3.274 REQUIRED)
endif()
if (ENABLE_LIBUSB)

View File

@@ -1,3 +1,15 @@
| Pull Request | Commit | Title | Author | Merged? |
|----|----|----|----|----|
| [12448](https://github.com/yuzu-emu/yuzu//pull/12448) | [`b1d4804c0`](https://github.com/yuzu-emu/yuzu//pull/12448/files) | renderer_vulkan: demote format assert to error log | [liamwhite](https://github.com/liamwhite/) | Yes |
| [12449](https://github.com/yuzu-emu/yuzu//pull/12449) | [`6a1ddc502`](https://github.com/yuzu-emu/yuzu//pull/12449/files) | renderer_vulkan: skip SetObjectNameEXT on unsupported driver | [liamwhite](https://github.com/liamwhite/) | Yes |
| [12466](https://github.com/yuzu-emu/yuzu//pull/12466) | [`5f3720138`](https://github.com/yuzu-emu/yuzu//pull/12466/files) | core: track separate heap allocation for linux | [liamwhite](https://github.com/liamwhite/) | Yes |
| [12467](https://github.com/yuzu-emu/yuzu//pull/12467) | [`cfc6c5f8f`](https://github.com/yuzu-emu/yuzu//pull/12467/files) | Revert " shader_recompiler: use minimal clip distance array " | [liamwhite](https://github.com/liamwhite/) | Yes |
End of merge log. You can find the original README.md below the break.
-----
<!--
SPDX-FileCopyrightText: 2018 yuzu Emulator Project
SPDX-License-Identifier: GPL-2.0-or-later

View File

@@ -91,18 +91,20 @@ class GamesFragment : Fragment() {
viewLifecycleOwner.lifecycleScope.apply {
launch {
repeatOnLifecycle(Lifecycle.State.RESUMED) {
gamesViewModel.isReloading.collect { binding.swipeRefresh.isRefreshing = it }
gamesViewModel.isReloading.collect {
binding.swipeRefresh.isRefreshing = it
if (gamesViewModel.games.value.isEmpty() && !it) {
binding.noticeText.visibility = View.VISIBLE
} else {
binding.noticeText.visibility = View.INVISIBLE
}
}
}
}
launch {
repeatOnLifecycle(Lifecycle.State.RESUMED) {
gamesViewModel.games.collectLatest {
(binding.gridGames.adapter as GameAdapter).submitList(it)
if (it.isEmpty()) {
binding.noticeText.visibility = View.VISIBLE
} else {
binding.noticeText.visibility = View.GONE
}
}
}
}

View File

@@ -256,11 +256,13 @@
<string-array name="outputEngineEntries">
<item>@string/auto</item>
<item>@string/oboe</item>
<item>@string/cubeb</item>
<item>@string/string_null</item>
</string-array>
<integer-array name="outputEngineValues">
<item>0</item>
<item>4</item>
<item>1</item>
<item>3</item>
</integer-array>

View File

@@ -503,6 +503,7 @@
<string name="theme_mode_dark">Dark</string>
<!-- Audio output engines -->
<string name="oboe">oboe</string>
<string name="cubeb">cubeb</string>
<!-- Black backgrounds theme -->

View File

@@ -253,6 +253,17 @@ if (ENABLE_SDL2)
target_compile_definitions(audio_core PRIVATE HAVE_SDL2)
endif()
if (ANDROID)
target_sources(audio_core PRIVATE
sink/oboe_sink.cpp
sink/oboe_sink.h
)
# FIXME: this port seems broken, it cannot be imported with find_package(oboe REQUIRED)
target_link_libraries(audio_core PRIVATE "${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/lib/liboboe.a")
target_compile_definitions(audio_core PRIVATE HAVE_OBOE)
endif()
if (YUZU_USE_PRECOMPILED_HEADERS)
target_precompile_headers(audio_core PRIVATE precompiled_headers.h)
endif()

View File

@@ -253,8 +253,9 @@ CubebSink::~CubebSink() {
#endif
}
SinkStream* CubebSink::AcquireSinkStream(Core::System& system, u32 system_channels,
SinkStream* CubebSink::AcquireSinkStream(Core::System& system, u32 system_channels_,
const std::string& name, StreamType type) {
system_channels = system_channels_;
SinkStreamPtr& stream = sink_streams.emplace_back(std::make_unique<CubebSinkStream>(
ctx, device_channels, system_channels, output_device, input_device, name, type, system));

View File

@@ -0,0 +1,223 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <span>
#include <vector>
#include <oboe/Oboe.h>
#include "audio_core/common/common.h"
#include "audio_core/sink/oboe_sink.h"
#include "audio_core/sink/sink_stream.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/core.h"
namespace AudioCore::Sink {
class OboeSinkStream final : public SinkStream,
public oboe::AudioStreamDataCallback,
public oboe::AudioStreamErrorCallback {
public:
explicit OboeSinkStream(Core::System& system_, StreamType type_, const std::string& name_,
u32 system_channels_)
: SinkStream(system_, type_) {
name = name_;
system_channels = system_channels_;
this->OpenStream();
}
~OboeSinkStream() override {
LOG_INFO(Audio_Sink, "Destroyed Oboe stream");
}
void Finalize() override {
this->Stop();
m_stream.reset();
}
void Start(bool resume = false) override {
if (!m_stream || !paused) {
return;
}
paused = false;
if (m_stream->start() != oboe::Result::OK) {
LOG_CRITICAL(Audio_Sink, "Error starting Oboe stream");
}
}
void Stop() override {
if (!m_stream || paused) {
return;
}
this->SignalPause();
if (m_stream->stop() != oboe::Result::OK) {
LOG_CRITICAL(Audio_Sink, "Error stopping Oboe stream");
}
}
public:
static s32 QueryChannelCount(oboe::Direction direction) {
std::shared_ptr<oboe::AudioStream> temp_stream;
oboe::AudioStreamBuilder builder;
const auto result = ConfigureBuilder(builder, direction)->openStream(temp_stream);
ASSERT(result == oboe::Result::OK);
return temp_stream->getChannelCount() >= 6 ? 6 : 2;
}
protected:
oboe::DataCallbackResult onAudioReady(oboe::AudioStream*, void* audio_data,
s32 num_buffer_frames) override {
const size_t num_channels = this->GetDeviceChannels();
const size_t frame_size = num_channels;
const size_t num_frames = static_cast<size_t>(num_buffer_frames);
if (type == StreamType::In) {
std::span<const s16> input_buffer{reinterpret_cast<const s16*>(audio_data),
num_frames * frame_size};
this->ProcessAudioIn(input_buffer, num_frames);
} else {
std::span<s16> output_buffer{reinterpret_cast<s16*>(audio_data),
num_frames * frame_size};
this->ProcessAudioOutAndRender(output_buffer, num_frames);
}
return oboe::DataCallbackResult::Continue;
}
void onErrorAfterClose(oboe::AudioStream*, oboe::Result) override {
LOG_INFO(Audio_Sink, "Audio stream closed, reinitializing");
if (this->OpenStream()) {
m_stream->start();
}
}
private:
static oboe::AudioStreamBuilder* ConfigureBuilder(oboe::AudioStreamBuilder& builder,
oboe::Direction direction) {
// TODO: investigate callback delay issues when using AAudio
return builder.setPerformanceMode(oboe::PerformanceMode::LowLatency)
->setAudioApi(oboe::AudioApi::OpenSLES)
->setDirection(direction)
->setSampleRate(TargetSampleRate)
->setSampleRateConversionQuality(oboe::SampleRateConversionQuality::High)
->setFormat(oboe::AudioFormat::I16)
->setFormatConversionAllowed(true)
->setUsage(oboe::Usage::Game)
->setBufferCapacityInFrames(TargetSampleCount * 2);
}
bool OpenStream() {
const auto direction = [&]() {
switch (type) {
case StreamType::In:
return oboe::Direction::Input;
case StreamType::Out:
case StreamType::Render:
return oboe::Direction::Output;
default:
ASSERT(false);
return oboe::Direction::Output;
}
}();
const auto expected_channels = QueryChannelCount(direction);
const auto expected_mask = [&]() {
switch (expected_channels) {
case 1:
return oboe::ChannelMask::Mono;
case 2:
return oboe::ChannelMask::Stereo;
case 6:
return oboe::ChannelMask::CM5Point1;
default:
ASSERT(false);
return oboe::ChannelMask::Unspecified;
}
}();
oboe::AudioStreamBuilder builder;
const auto result = ConfigureBuilder(builder, direction)
->setChannelCount(expected_channels)
->setChannelMask(expected_mask)
->setChannelConversionAllowed(true)
->setDataCallback(this)
->setErrorCallback(this)
->openStream(m_stream);
ASSERT(result == oboe::Result::OK);
return result == oboe::Result::OK && this->SetStreamProperties();
}
bool SetStreamProperties() {
ASSERT(m_stream);
m_stream->setBufferSizeInFrames(TargetSampleCount * 2);
device_channels = m_stream->getChannelCount();
const auto sample_rate = m_stream->getSampleRate();
const auto buffer_capacity = m_stream->getBufferCapacityInFrames();
const auto stream_backend =
m_stream->getAudioApi() == oboe::AudioApi::AAudio ? "AAudio" : "OpenSLES";
LOG_INFO(Audio_Sink, "Opened Oboe {} stream with {} channels sample rate {} capacity {}",
stream_backend, device_channels, sample_rate, buffer_capacity);
return true;
}
std::shared_ptr<oboe::AudioStream> m_stream{};
};
OboeSink::OboeSink() {
// TODO: This is not generally knowable
// The channel count is distinct based on direction and can change
device_channels = OboeSinkStream::QueryChannelCount(oboe::Direction::Output);
}
OboeSink::~OboeSink() = default;
SinkStream* OboeSink::AcquireSinkStream(Core::System& system, u32 system_channels,
const std::string& name, StreamType type) {
SinkStreamPtr& stream = sink_streams.emplace_back(
std::make_unique<OboeSinkStream>(system, type, name, system_channels));
return stream.get();
}
void OboeSink::CloseStream(SinkStream* to_remove) {
sink_streams.remove_if([&](auto& stream) { return stream.get() == to_remove; });
}
void OboeSink::CloseStreams() {
sink_streams.clear();
}
f32 OboeSink::GetDeviceVolume() const {
if (sink_streams.empty()) {
return 1.0f;
}
return sink_streams.front()->GetDeviceVolume();
}
void OboeSink::SetDeviceVolume(f32 volume) {
for (auto& stream : sink_streams) {
stream->SetDeviceVolume(volume);
}
}
void OboeSink::SetSystemVolume(f32 volume) {
for (auto& stream : sink_streams) {
stream->SetSystemVolume(volume);
}
}
} // namespace AudioCore::Sink

View File

@@ -0,0 +1,75 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <list>
#include <string>
#include "audio_core/sink/sink.h"
namespace Core {
class System;
}
namespace AudioCore::Sink {
class SinkStream;
class OboeSink final : public Sink {
public:
explicit OboeSink();
~OboeSink() override;
/**
* Create a new sink stream.
*
* @param system - Core system.
* @param system_channels - Number of channels the audio system expects.
* May differ from the device's channel count.
* @param name - Name of this stream.
* @param type - Type of this stream, render/in/out.
*
* @return A pointer to the created SinkStream
*/
SinkStream* AcquireSinkStream(Core::System& system, u32 system_channels,
const std::string& name, StreamType type) override;
/**
* Close a given stream.
*
* @param stream - The stream to close.
*/
void CloseStream(SinkStream* stream) override;
/**
* Close all streams.
*/
void CloseStreams() override;
/**
* Get the device volume. Set from calls to the IAudioDevice service.
*
* @return Volume of the device.
*/
f32 GetDeviceVolume() const override;
/**
* Set the device volume. Set from calls to the IAudioDevice service.
*
* @param volume - New volume of the device.
*/
void SetDeviceVolume(f32 volume) override;
/**
* Set the system volume. Comes from the audio system using this stream.
*
* @param volume - New volume of the system.
*/
void SetSystemVolume(f32 volume) override;
private:
/// List of streams managed by this sink
std::list<SinkStreamPtr> sink_streams{};
};
} // namespace AudioCore::Sink

View File

@@ -168,8 +168,9 @@ SDLSink::SDLSink(std::string_view target_device_name) {
SDLSink::~SDLSink() = default;
SinkStream* SDLSink::AcquireSinkStream(Core::System& system, u32 system_channels,
SinkStream* SDLSink::AcquireSinkStream(Core::System& system, u32 system_channels_,
const std::string&, StreamType type) {
system_channels = system_channels_;
SinkStreamPtr& stream = sink_streams.emplace_back(std::make_unique<SDLSinkStream>(
device_channels, system_channels, output_device, input_device, type, system));
return stream.get();

View File

@@ -85,9 +85,21 @@ public:
*/
virtual void SetSystemVolume(f32 volume) = 0;
/**
* Get the number of channels the game has set, can be different to the host hardware's support.
* Either 2 or 6.
*
* @return Number of device channels.
*/
u32 GetSystemChannels() const {
return system_channels;
}
protected:
/// Number of device channels supported by the hardware
u32 device_channels{2};
/// Number of channels the game is sending
u32 system_channels{2};
};
using SinkPtr = std::unique_ptr<Sink>;

View File

@@ -7,6 +7,9 @@
#include <vector>
#include "audio_core/sink/sink_details.h"
#ifdef HAVE_OBOE
#include "audio_core/sink/oboe_sink.h"
#endif
#ifdef HAVE_CUBEB
#include "audio_core/sink/cubeb_sink.h"
#endif
@@ -36,6 +39,16 @@ struct SinkDetails {
// sink_details is ordered in terms of desirability, with the best choice at the top.
constexpr SinkDetails sink_details[] = {
#ifdef HAVE_OBOE
SinkDetails{
Settings::AudioEngine::Oboe,
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<OboeSink>();
},
[](bool capture) { return std::vector<std::string>{"Default"}; },
[]() { return true; },
},
#endif
#ifdef HAVE_CUBEB
SinkDetails{
Settings::AudioEngine::Cubeb,

View File

@@ -40,29 +40,36 @@ void SinkStream::AppendBuffer(SinkBuffer& buffer, std::span<s16> samples) {
if (system_channels == 6 && device_channels == 2) {
// We're given 6 channels, but our device only outputs 2, so downmix.
static constexpr std::array<f32, 4> down_mix_coeff{1.0f, 0.707f, 0.251f, 0.707f};
// Front = 1.0
// Center = 0.596
// LFE = 0.354
// Back = 0.707
static constexpr std::array<f32, 4> down_mix_coeff{1.0, 0.596f, 0.354f, 0.707f};
for (u32 read_index = 0, write_index = 0; read_index < samples.size();
read_index += system_channels, write_index += device_channels) {
const auto fl =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::FrontLeft)]);
const auto fr =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::FrontRight)]);
const auto c =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::Center)]);
const auto lfe =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::LFE)]);
const auto bl =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::BackLeft)]);
const auto br =
static_cast<f32>(samples[read_index + static_cast<u32>(Channels::BackRight)]);
const auto left_sample{
((Common::FixedPoint<49, 15>(
samples[read_index + static_cast<u32>(Channels::FrontLeft)]) *
down_mix_coeff[0] +
samples[read_index + static_cast<u32>(Channels::Center)] * down_mix_coeff[1] +
samples[read_index + static_cast<u32>(Channels::LFE)] * down_mix_coeff[2] +
samples[read_index + static_cast<u32>(Channels::BackLeft)] * down_mix_coeff[3]) *
volume)
.to_int()};
static_cast<s32>((fl * down_mix_coeff[0] + c * down_mix_coeff[1] +
lfe * down_mix_coeff[2] + bl * down_mix_coeff[3]) *
volume)};
const auto right_sample{
((Common::FixedPoint<49, 15>(
samples[read_index + static_cast<u32>(Channels::FrontRight)]) *
down_mix_coeff[0] +
samples[read_index + static_cast<u32>(Channels::Center)] * down_mix_coeff[1] +
samples[read_index + static_cast<u32>(Channels::LFE)] * down_mix_coeff[2] +
samples[read_index + static_cast<u32>(Channels::BackRight)] * down_mix_coeff[3]) *
volume)
.to_int()};
static_cast<s32>((fr * down_mix_coeff[0] + c * down_mix_coeff[1] +
lfe * down_mix_coeff[2] + br * down_mix_coeff[3]) *
volume)};
samples[write_index + static_cast<u32>(Channels::FrontLeft)] =
static_cast<s16>(std::clamp(left_sample, min, max));

View File

@@ -64,6 +64,8 @@ add_library(common STATIC
fs/path_util.cpp
fs/path_util.h
hash.h
heap_tracker.cpp
heap_tracker.h
hex_util.cpp
hex_util.h
host_memory.cpp

385
src/common/heap_tracker.cpp Normal file
View File

@@ -0,0 +1,385 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "common/heap_tracker.h"
namespace Common {
namespace {
constexpr size_t MaxResidentMapCount = 0x8000;
} // namespace
HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {}
HeapTracker::~HeapTracker() = default;
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
MemoryPermission perm, bool is_separate_heap) {
// When mapping other memory, map pages immediately.
if (!is_separate_heap) {
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
return;
}
{
// We are mapping part of a separate heap.
std::scoped_lock lk{m_lock};
auto* map = new SeparateHeapMap{
.vaddr = virtual_offset,
.paddr = host_offset,
.size = length,
.map_id = m_next_map_id++,
.tick = m_tick++,
.perm = perm,
.is_resident = false,
};
// Insert into mappings.
m_mappings.insert(*map);
}
// Finally, map.
this->DeferredMapSeparateHeap(virtual_offset);
}
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
// If this is a separate heap...
if (is_separate_heap) {
std::scoped_lock lk{m_rebuild_lock, m_lock};
const SeparateHeapMap key{
.vaddr = virtual_offset,
.size = size,
};
// Split at the boundaries of the region we are removing.
this->SplitHeapMapLocked(virtual_offset);
this->SplitHeapMapLocked(virtual_offset + size);
// Erase all mappings in range.
auto it = m_mappings.find(key);
while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
// Get pointer to item.
SeparateHeapMap* const item = std::addressof(*it);
if (item->is_resident) {
// Unlink from resident tree.
m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
// Decrease reference count.
const auto count_it = m_resident_map_counts.find(item->map_id);
this->RemoveReferenceLocked(count_it, 1);
}
// Unlink from mapping tree and advance.
it = m_mappings.erase(it);
// Free the item.
delete item;
}
}
// Unmap pages.
m_buffer.Unmap(virtual_offset, size, false);
}
void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
// Ensure no rebuild occurs while reprotecting.
std::shared_lock lk{m_rebuild_lock};
// Split at the boundaries of the region we are reprotecting.
this->SplitHeapMap(virtual_offset, size);
// Declare tracking variables.
VAddr cur = virtual_offset;
VAddr end = virtual_offset + size;
while (cur < end) {
VAddr next = cur;
bool should_protect = false;
{
std::scoped_lock lk2{m_lock};
const SeparateHeapMap key{
.vaddr = next,
};
// Try to get the next mapping corresponding to this address.
const auto it = m_mappings.nfind_key(key);
if (it == m_mappings.end()) {
// There are no separate heap mappings remaining.
next = end;
should_protect = true;
} else if (it->vaddr == cur) {
// We are in range.
// Update permission bits.
it->perm = perm;
// Determine next address and whether we should protect.
next = cur + it->size;
should_protect = it->is_resident;
} else /* if (it->vaddr > cur) */ {
// We weren't in range, but there is a block coming up that will be.
next = it->vaddr;
should_protect = true;
}
}
// Clamp to end.
next = std::min(next, end);
// Reprotect, if we need to.
if (should_protect) {
m_buffer.Protect(cur, next - cur, perm);
}
// Advance.
cur = next;
}
}
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
if (m_buffer.IsInVirtualRange(fault_address)) {
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
}
return false;
}
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
std::scoped_lock lk{m_lock};
while (this->IsEvictRequiredLocked()) {
// Unlock before we rebuild to ensure proper lock ordering.
m_lock.unlock();
// Evict four maps.
for (size_t i = 0; i < 4; /* ... */) {
i += this->EvictSingleSeparateHeapMap();
}
// Lock again.
m_lock.lock();
}
// Check to ensure this was a non-resident separate heap mapping.
const auto it = this->GetNearestHeapMapLocked(virtual_offset);
if (it == m_mappings.end()) {
// Not in any separate heap.
return false;
}
if (it->is_resident) {
// Already mapped and shouldn't be considered again.
return false;
}
// Map the area.
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
// This map is now resident.
this->AddReferenceLocked(it->map_id, 1);
it->is_resident = true;
it->tick = m_tick++;
// Insert into resident maps.
m_resident_mappings.insert(*it);
// We succeeded.
return true;
}
bool HeapTracker::EvictSingleSeparateHeapMap() {
std::scoped_lock lk{m_rebuild_lock, m_lock};
ASSERT(!m_resident_mappings.empty());
// Select the item with the lowest tick to evict.
auto* const item = std::addressof(*m_resident_mappings.begin());
auto it = m_mappings.iterator_to(*item);
// Track the map ID.
const size_t map_id = it->map_id;
// Walk backwards until we find the first entry.
while (it != m_mappings.begin()) {
// If the previous element does not have the same map ID, stop.
const auto prev = std::prev(it);
if (prev->map_id != map_id) {
break;
}
// Continue.
it = prev;
}
// Track the begin and end address.
const VAddr begin_vaddr = it->vaddr;
VAddr end_vaddr = begin_vaddr;
// Get the count iterator.
const auto count_it = m_resident_map_counts.find(map_id);
// Declare whether we have erased an underlying mapping.
bool was_erased = false;
// Unmark and merge everything in range.
while (it != m_mappings.end() && it->map_id == map_id) {
if (it->is_resident) {
// Remove from resident tree.
m_resident_mappings.erase(m_resident_mappings.iterator_to(*it));
it->is_resident = false;
// Remove reference count.
was_erased |= this->RemoveReferenceLocked(count_it, 1);
}
// Update the end address.
end_vaddr = it->vaddr + it->size;
// Advance.
it = this->MergeHeapMapForEvictLocked(it);
}
// Finally, unmap.
ASSERT(end_vaddr >= begin_vaddr);
m_buffer.Unmap(begin_vaddr, end_vaddr - begin_vaddr, false);
// Return whether we actually removed a mapping.
// This will be true if there were no holes, which is likely.
return was_erased;
}
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
std::scoped_lock lk{m_lock};
this->SplitHeapMapLocked(offset);
this->SplitHeapMapLocked(offset + size);
}
void HeapTracker::SplitHeapMapLocked(VAddr offset) {
const auto it = this->GetNearestHeapMapLocked(offset);
if (it == m_mappings.end() || it->vaddr == offset) {
// Not contained or no split required.
return;
}
// Get the underlying item as the left.
auto* const left = std::addressof(*it);
// Cache the original size values.
const size_t size = left->size;
// Adjust the left map.
const size_t left_size = offset - left->vaddr;
left->size = left_size;
// Create the new right map.
auto* const right = new SeparateHeapMap{
.vaddr = left->vaddr + left_size,
.paddr = left->paddr + left_size,
.size = size - left_size,
.map_id = left->map_id,
.tick = left->tick,
.perm = left->perm,
.is_resident = left->is_resident,
};
// Insert the new right map.
m_mappings.insert(*right);
// If the original map was not resident, we are done.
if (!left->is_resident) {
return;
}
// Update reference count.
this->AddReferenceLocked(left->map_id, 1);
// Insert right into resident map.
m_resident_mappings.insert(*right);
}
HeapTracker::AddrTree::iterator HeapTracker::MergeHeapMapForEvictLocked(AddrTree::iterator it) {
if (it == m_mappings.end()) {
// Not contained.
return it;
}
if (it == m_mappings.begin()) {
// Nothing to merge with.
return std::next(it);
}
// Get the left and right items.
auto* const right = std::addressof(*it);
auto* const left = std::addressof(*std::prev(it));
if (left->vaddr + left->size != right->vaddr) {
// Virtual range not contiguous, cannot merge.
return std::next(it);
}
if (left->paddr + left->size != right->paddr) {
// Physical range not contiguous, cannot merge.
return std::next(it);
}
if (left->perm != right->perm) {
// Permissions mismatch, cannot merge.
return std::next(it);
}
if (left->map_id != right->map_id) {
// Map ID mismatch, cannot merge.
return std::next(it);
}
// Merge size to the left.
left->size += right->size;
// Erase the right element.
const auto next_it = m_mappings.erase(it);
// Free the right element.
delete right;
// Return the iterator to the next position.
return next_it;
}
HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
const SeparateHeapMap key{
.vaddr = offset,
};
return m_mappings.find(key);
}
void HeapTracker::AddReferenceLocked(size_t map_id, size_t inc) {
m_resident_map_counts[map_id]++;
}
bool HeapTracker::RemoveReferenceLocked(MapCountTree::iterator it, size_t dec) {
ASSERT(it != m_resident_map_counts.end());
const auto new_value = it->second -= dec;
ASSERT(new_value >= 0);
if (new_value <= 0) {
m_resident_map_counts.erase(it);
return true;
}
return false;
}
bool HeapTracker::IsEvictRequiredLocked() {
return m_resident_map_counts.size() > MaxResidentMapCount;
}
} // namespace Common

103
src/common/heap_tracker.h Normal file
View File

@@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <map>
#include <mutex>
#include <shared_mutex>
#include "common/host_memory.h"
#include "common/intrusive_red_black_tree.h"
namespace Common {
struct SeparateHeapMap {
Common::IntrusiveRedBlackTreeNode addr_node{};
Common::IntrusiveRedBlackTreeNode tick_node{};
VAddr vaddr{};
PAddr paddr{};
size_t size{};
size_t map_id{};
size_t tick{};
MemoryPermission perm{};
bool is_resident{};
};
struct SeparateHeapMapAddrComparator {
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
if (lhs.vaddr < rhs.vaddr) {
return -1;
} else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
return 0;
} else {
return 1;
}
}
};
struct SeparateHeapMapTickComparator {
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
if (lhs.tick < rhs.tick) {
return -1;
} else if (lhs.tick > rhs.tick) {
return 1;
} else {
return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
}
}
};
class HeapTracker {
public:
explicit HeapTracker(Common::HostMemory& buffer);
~HeapTracker();
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
bool is_separate_heap);
void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
u8* VirtualBasePointer() {
return m_buffer.VirtualBasePointer();
}
bool DeferredMapSeparateHeap(u8* fault_address);
bool DeferredMapSeparateHeap(size_t virtual_offset);
private:
Common::HostMemory& m_buffer;
std::shared_mutex m_rebuild_lock{};
std::mutex m_lock{};
size_t m_next_map_id{};
size_t m_tick{};
private:
using AddrTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
using TickTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
using MapCountTree = std::map<size_t, s64>;
MapCountTree m_resident_map_counts{};
AddrTree m_mappings{};
TickTree m_resident_mappings{};
private:
void SplitHeapMap(VAddr offset, size_t size);
void SplitHeapMapLocked(VAddr offset);
AddrTree::iterator MergeHeapMapForEvictLocked(AddrTree::iterator cur);
AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
bool EvictSingleSeparateHeapMap();
void AddReferenceLocked(size_t map_id, size_t inc);
bool RemoveReferenceLocked(MapCountTree::iterator map_id, size_t dec);
bool IsEvictRequiredLocked();
};
} // namespace Common

View File

@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
MemoryPermission perms) {
MemoryPermission perms, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(host_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
}
void HostMemory::Unmap(size_t virtual_offset, size_t length) {
void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
impl->Unmap(virtual_offset + virtual_base_offset, length);
}
void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write,
bool execute) {
void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
ASSERT(virtual_offset % PageAlignment == 0);
ASSERT(length % PageAlignment == 0);
ASSERT(virtual_offset + length <= virtual_size);
if (length == 0 || !virtual_base || !impl) {
return;
}
const bool read = True(perm & MemoryPermission::Read);
const bool write = True(perm & MemoryPermission::Write);
const bool execute = True(perm & MemoryPermission::Execute);
impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
}

View File

@@ -40,11 +40,12 @@ public:
HostMemory(HostMemory&& other) noexcept;
HostMemory& operator=(HostMemory&& other) noexcept;
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms);
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
bool separate_heap);
void Unmap(size_t virtual_offset, size_t length);
void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false);
void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
void EnableDirectMappedAddress();
@@ -64,6 +65,10 @@ public:
return virtual_base;
}
bool IsInVirtualRange(void* address) const noexcept {
return address >= virtual_base && address < virtual_base + virtual_size;
}
private:
size_t backing_size{};
size_t virtual_size{};

View File

@@ -82,16 +82,15 @@ enum class AudioEngine : u32 {
Cubeb,
Sdl2,
Null,
Oboe,
};
template <>
inline std::vector<std::pair<std::string, AudioEngine>>
EnumMetadata<AudioEngine>::Canonicalizations() {
return {
{"auto", AudioEngine::Auto},
{"cubeb", AudioEngine::Cubeb},
{"sdl2", AudioEngine::Sdl2},
{"null", AudioEngine::Null},
{"auto", AudioEngine::Auto}, {"cubeb", AudioEngine::Cubeb}, {"sdl2", AudioEngine::Sdl2},
{"null", AudioEngine::Null}, {"oboe", AudioEngine::Oboe},
};
}

View File

@@ -978,6 +978,7 @@ endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
target_sources(core PRIVATE
arm/dynarmic/arm_dynarmic.cpp
arm/dynarmic/arm_dynarmic.h
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h

View File

@@ -9,7 +9,7 @@
namespace Core {
void ArmInterface::LogBacktrace(const Kernel::KProcess* process) const {
void ArmInterface::LogBacktrace(Kernel::KProcess* process) const {
Kernel::Svc::ThreadContext ctx;
this->GetContext(ctx);

View File

@@ -95,7 +95,7 @@ public:
virtual void SignalInterrupt(Kernel::KThread* thread) = 0;
// Stack trace generation.
void LogBacktrace(const Kernel::KProcess* process) const;
void LogBacktrace(Kernel::KProcess* process) const;
// Debug functionality.
virtual const Kernel::DebugWatchpoint* HaltedWatchpoint() const = 0;

View File

@@ -79,7 +79,7 @@ constexpr std::array<u64, 2> SegmentBases{
0x7100000000ULL,
};
void SymbolicateBacktrace(const Kernel::KProcess* process, std::vector<BacktraceEntry>& out) {
void SymbolicateBacktrace(Kernel::KProcess* process, std::vector<BacktraceEntry>& out) {
auto modules = FindModules(process);
const bool is_64 = process->Is64Bit();
@@ -118,7 +118,7 @@ void SymbolicateBacktrace(const Kernel::KProcess* process, std::vector<Backtrace
}
}
std::vector<BacktraceEntry> GetAArch64Backtrace(const Kernel::KProcess* process,
std::vector<BacktraceEntry> GetAArch64Backtrace(Kernel::KProcess* process,
const Kernel::Svc::ThreadContext& ctx) {
std::vector<BacktraceEntry> out;
auto& memory = process->GetMemory();
@@ -144,7 +144,7 @@ std::vector<BacktraceEntry> GetAArch64Backtrace(const Kernel::KProcess* process,
return out;
}
std::vector<BacktraceEntry> GetAArch32Backtrace(const Kernel::KProcess* process,
std::vector<BacktraceEntry> GetAArch32Backtrace(Kernel::KProcess* process,
const Kernel::Svc::ThreadContext& ctx) {
std::vector<BacktraceEntry> out;
auto& memory = process->GetMemory();
@@ -173,7 +173,7 @@ std::vector<BacktraceEntry> GetAArch32Backtrace(const Kernel::KProcess* process,
} // namespace
std::optional<std::string> GetThreadName(const Kernel::KThread* thread) {
const auto* process = thread->GetOwnerProcess();
auto* process = thread->GetOwnerProcess();
if (process->Is64Bit()) {
return GetNameFromThreadType64(process->GetMemory(), *thread);
} else {
@@ -248,7 +248,7 @@ Kernel::KProcessAddress GetModuleEnd(const Kernel::KProcess* process,
return cur_addr - 1;
}
Loader::AppLoader::Modules FindModules(const Kernel::KProcess* process) {
Loader::AppLoader::Modules FindModules(Kernel::KProcess* process) {
Loader::AppLoader::Modules modules;
auto& page_table = process->GetPageTable();
@@ -312,7 +312,7 @@ Loader::AppLoader::Modules FindModules(const Kernel::KProcess* process) {
return modules;
}
Kernel::KProcessAddress FindMainModuleEntrypoint(const Kernel::KProcess* process) {
Kernel::KProcessAddress FindMainModuleEntrypoint(Kernel::KProcess* process) {
// Do we have any loaded executable sections?
auto modules = FindModules(process);
@@ -337,7 +337,7 @@ void InvalidateInstructionCacheRange(const Kernel::KProcess* process, u64 addres
}
}
std::vector<BacktraceEntry> GetBacktraceFromContext(const Kernel::KProcess* process,
std::vector<BacktraceEntry> GetBacktraceFromContext(Kernel::KProcess* process,
const Kernel::Svc::ThreadContext& ctx) {
if (process->Is64Bit()) {
return GetAArch64Backtrace(process, ctx);

View File

@@ -14,9 +14,9 @@ std::optional<std::string> GetThreadName(const Kernel::KThread* thread);
std::string_view GetThreadWaitReason(const Kernel::KThread* thread);
std::string GetThreadState(const Kernel::KThread* thread);
Loader::AppLoader::Modules FindModules(const Kernel::KProcess* process);
Loader::AppLoader::Modules FindModules(Kernel::KProcess* process);
Kernel::KProcessAddress GetModuleEnd(const Kernel::KProcess* process, Kernel::KProcessAddress base);
Kernel::KProcessAddress FindMainModuleEntrypoint(const Kernel::KProcess* process);
Kernel::KProcessAddress FindMainModuleEntrypoint(Kernel::KProcess* process);
void InvalidateInstructionCacheRange(const Kernel::KProcess* process, u64 address, u64 size);
@@ -28,7 +28,7 @@ struct BacktraceEntry {
std::string name;
};
std::vector<BacktraceEntry> GetBacktraceFromContext(const Kernel::KProcess* process,
std::vector<BacktraceEntry> GetBacktraceFromContext(Kernel::KProcess* process,
const Kernel::Svc::ThreadContext& ctx);
std::vector<BacktraceEntry> GetBacktrace(const Kernel::KThread* thread);

View File

@@ -0,0 +1,49 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef __linux__
#include "common/signal_chain.h"
#include "core/arm/dynarmic/arm_dynarmic.h"
#include "core/hle/kernel/k_process.h"
#include "core/memory.h"
namespace Core {
namespace {
thread_local Core::Memory::Memory* g_current_memory{};
std::once_flag g_registered{};
struct sigaction g_old_segv {};
void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
return;
}
return g_old_segv.sa_sigaction(sig, info, ctx);
}
} // namespace
ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
g_current_memory = std::addressof(process->GetMemory());
}
ScopedJitExecution::~ScopedJitExecution() {
g_current_memory = nullptr;
}
void ScopedJitExecution::RegisterHandler() {
std::call_once(g_registered, [] {
struct sigaction sa {};
sa.sa_sigaction = &HandleSigSegv;
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
});
}
} // namespace Core
#endif

View File

@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
return static_cast<HaltReason>(hr);
}
#ifdef __linux__
class ScopedJitExecution {
public:
explicit ScopedJitExecution(Kernel::KProcess* process);
~ScopedJitExecution();
static void RegisterHandler();
};
#else
class ScopedJitExecution {
public:
explicit ScopedJitExecution(Kernel::KProcess* process) {}
~ScopedJitExecution() {}
static void RegisterHandler() {}
};
#endif
} // namespace Core

View File

@@ -15,7 +15,7 @@ using namespace Common::Literals;
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ArmDynarmic32& parent, const Kernel::KProcess* process)
explicit DynarmicCallbacks32(ArmDynarmic32& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory()),
m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
m_check_memory_access{m_debugger_enabled ||
@@ -169,7 +169,7 @@ public:
ArmDynarmic32& m_parent;
Core::Memory::Memory& m_memory;
const Kernel::KProcess* m_process{};
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
static constexpr u64 MinimumRunCycles = 10000U;
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
}
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -370,13 +374,14 @@ void ArmDynarmic32::RewindBreakpointInstruction() {
this->SetContext(m_breakpoint_context);
}
ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
m_cb(std::make_unique<DynarmicCallbacks32>(*this, process)),
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
m_jit = MakeJit(&page_table_impl);
ScopedJitExecution::RegisterHandler();
}
ArmDynarmic32::~ArmDynarmic32() = default;

View File

@@ -20,7 +20,7 @@ class System;
class ArmDynarmic32 final : public ArmInterface {
public:
ArmDynarmic32(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ArmDynarmic32() override;

View File

@@ -15,7 +15,7 @@ using namespace Common::Literals;
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ArmDynarmic64& parent, const Kernel::KProcess* process)
explicit DynarmicCallbacks64(ArmDynarmic64& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory()),
m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
m_check_memory_access{m_debugger_enabled ||
@@ -216,7 +216,7 @@ public:
Core::Memory::Memory& m_memory;
u64 m_tpidrro_el0{};
u64 m_tpidr_el0{};
const Kernel::KProcess* m_process{};
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
static constexpr u64 MinimumRunCycles = 10000U;
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
}
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
ScopedJitExecution sj(thread->GetOwnerProcess());
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -399,13 +403,14 @@ void ArmDynarmic64::RewindBreakpointInstruction() {
this->SetContext(m_breakpoint_context);
}
ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
m_cb(std::make_unique<DynarmicCallbacks64>(*this, process)), m_core_index{core_index} {
auto& page_table = process->GetPageTable().GetBasePageTable();
auto& page_table_impl = page_table.GetImpl();
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
ScopedJitExecution::RegisterHandler();
}
ArmDynarmic64::~ArmDynarmic64() = default;

View File

@@ -25,7 +25,7 @@ class System;
class ArmDynarmic64 final : public ArmInterface {
public:
ArmDynarmic64(System& system, bool uses_wall_clock, const Kernel::KProcess* process,
ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ArmDynarmic64() override;

View File

@@ -39,7 +39,7 @@ fpsimd_context* GetFloatingPointState(mcontext_t& host_ctx) {
}
using namespace Common::Literals;
constexpr u32 StackSize = 32_KiB;
constexpr u32 StackSize = 128_KiB;
} // namespace

View File

@@ -5,8 +5,6 @@
#include "common/bit_cast.h"
#include "core/arm/nce/interpreter_visitor.h"
#include <dynarmic/frontend/A64/decoder/a64.h>
namespace Core {
template <u32 BitSize>
@@ -249,6 +247,7 @@ bool InterpreterVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) {
return false;
}
// Size in bytes
const u64 size = 4 << opc.ZeroExtend();
const u64 offset = imm19.SignExtend<u64>() << 2;
const u64 address = this->GetPc() + offset;
@@ -530,7 +529,7 @@ bool InterpreterVisitor::SIMDImmediate(bool wback, bool postindex, size_t scale,
}
case MemOp::Load: {
u128 data{};
m_memory.ReadBlock(address, &data, datasize);
m_memory.ReadBlock(address, &data, datasize / 8);
this->SetVec(Vt, data);
break;
}

View File

@@ -4,9 +4,15 @@
#pragma once
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#include <dynarmic/frontend/A64/a64_types.h>
#include <dynarmic/frontend/A64/decoder/a64.h>
#include <dynarmic/frontend/imm.h>
#pragma GCC diagnostic pop
namespace Core {
class VisitorBase {

View File

@@ -28,7 +28,6 @@
#include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs_concat.h"
#include "core/file_sys/vfs_real.h"
#include "core/gpu_dirty_memory_manager.h"
#include "core/hid/hid_core.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_process.h"
@@ -130,11 +129,8 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
struct System::Impl {
explicit Impl(System& system)
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
cpu_manager{system}, reporter{system}, applet_manager{system}, profile_manager{},
time_manager{system}, gpu_dirty_memory_write_manager{} {
memory.SetGPUDirtyManagers(gpu_dirty_memory_write_manager);
}
: kernel{system}, fs_controller{system}, hid_core{}, room_network{}, cpu_manager{system},
reporter{system}, applet_manager{system}, profile_manager{}, time_manager{system} {}
void Initialize(System& system) {
device_memory = std::make_unique<Core::DeviceMemory>();
@@ -241,17 +237,17 @@ struct System::Impl {
debugger = std::make_unique<Debugger>(system, port);
}
SystemResultStatus SetupForApplicationProcess(System& system, Frontend::EmuWindow& emu_window) {
void InitializeKernel(System& system) {
LOG_DEBUG(Core, "initialized OK");
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
ReinitializeIfNecessary(system);
memory.SetGPUDirtyManagers(gpu_dirty_memory_write_manager);
kernel.Initialize();
cpu_manager.Initialize();
}
SystemResultStatus SetupForApplicationProcess(System& system, Frontend::EmuWindow& emu_window) {
/// Reset all glue registrations
arp_manager.ResetAll();
@@ -300,17 +296,9 @@ struct System::Impl {
return SystemResultStatus::ErrorGetLoader;
}
SystemResultStatus init_result{SetupForApplicationProcess(system, emu_window)};
if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result));
ShutdownMainProcess();
return init_result;
}
InitializeKernel(system);
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
// Create the process.
// Create the application process.
auto main_process = Kernel::KProcess::Create(system.Kernel());
Kernel::KProcess::Register(system.Kernel(), main_process);
kernel.AppendNewProcess(main_process);
@@ -323,7 +311,18 @@ struct System::Impl {
return static_cast<SystemResultStatus>(
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
}
// Set up the rest of the system.
SystemResultStatus init_result{SetupForApplicationProcess(system, emu_window)};
if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result));
ShutdownMainProcess();
return init_result;
}
AddGlueRegistrationForProcess(*app_loader, *main_process);
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
// Initialize cheat engine
if (cheat_engine) {
@@ -426,7 +425,6 @@ struct System::Impl {
cpu_manager.Shutdown();
debugger.reset();
kernel.Shutdown();
memory.Reset();
Network::RestartSocketOperations();
if (auto room_member = room_network.GetRoomMember().lock()) {
@@ -507,7 +505,6 @@ struct System::Impl {
std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
std::unique_ptr<Core::DeviceMemory> device_memory;
std::unique_ptr<AudioCore::AudioCore> audio_core;
Core::Memory::Memory memory;
Core::HID::HIDCore hid_core;
Network::RoomNetwork room_network;
@@ -567,9 +564,6 @@ struct System::Impl {
std::array<u64, Core::Hardware::NUM_CPU_CORES> dynarmic_ticks{};
std::array<MicroProfileToken, Core::Hardware::NUM_CPU_CORES> microprofile_cpu{};
std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES>
gpu_dirty_memory_write_manager{};
std::deque<std::vector<u8>> user_channel;
};
@@ -652,29 +646,12 @@ void System::PrepareReschedule(const u32 core_index) {
impl->kernel.PrepareReschedule(core_index);
}
Core::GPUDirtyMemoryManager& System::CurrentGPUDirtyMemoryManager() {
const std::size_t core = impl->kernel.GetCurrentHostThreadID();
return impl->gpu_dirty_memory_write_manager[core < Core::Hardware::NUM_CPU_CORES
? core
: Core::Hardware::NUM_CPU_CORES - 1];
}
/// Provides a constant reference to the current gou dirty memory manager.
const Core::GPUDirtyMemoryManager& System::CurrentGPUDirtyMemoryManager() const {
const std::size_t core = impl->kernel.GetCurrentHostThreadID();
return impl->gpu_dirty_memory_write_manager[core < Core::Hardware::NUM_CPU_CORES
? core
: Core::Hardware::NUM_CPU_CORES - 1];
}
size_t System::GetCurrentHostThreadID() const {
return impl->kernel.GetCurrentHostThreadID();
}
void System::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
for (auto& manager : impl->gpu_dirty_memory_write_manager) {
manager.Gather(callback);
}
return this->ApplicationProcess()->GatherGPUDirtyMemory(callback);
}
PerfStatsResults System::GetAndResetPerfStats() {
@@ -723,20 +700,12 @@ const Kernel::KProcess* System::ApplicationProcess() const {
return impl->kernel.ApplicationProcess();
}
ExclusiveMonitor& System::Monitor() {
return impl->kernel.GetExclusiveMonitor();
}
const ExclusiveMonitor& System::Monitor() const {
return impl->kernel.GetExclusiveMonitor();
}
Memory::Memory& System::ApplicationMemory() {
return impl->memory;
return impl->kernel.ApplicationProcess()->GetMemory();
}
const Core::Memory::Memory& System::ApplicationMemory() const {
return impl->memory;
return impl->kernel.ApplicationProcess()->GetMemory();
}
Tegra::GPU& System::GPU() {

View File

@@ -116,7 +116,6 @@ class CpuManager;
class Debugger;
class DeviceMemory;
class ExclusiveMonitor;
class GPUDirtyMemoryManager;
class PerfStats;
class Reporter;
class SpeedLimiter;
@@ -225,12 +224,6 @@ public:
/// Prepare the core emulation for a reschedule
void PrepareReschedule(u32 core_index);
/// Provides a reference to the gou dirty memory manager.
[[nodiscard]] Core::GPUDirtyMemoryManager& CurrentGPUDirtyMemoryManager();
/// Provides a constant reference to the current gou dirty memory manager.
[[nodiscard]] const Core::GPUDirtyMemoryManager& CurrentGPUDirtyMemoryManager() const;
void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
[[nodiscard]] size_t GetCurrentHostThreadID() const;
@@ -250,12 +243,6 @@ public:
/// Gets a const reference to the underlying CPU manager
[[nodiscard]] const CpuManager& GetCpuManager() const;
/// Gets a reference to the exclusive monitor
[[nodiscard]] ExclusiveMonitor& Monitor();
/// Gets a constant reference to the exclusive monitor
[[nodiscard]] const ExclusiveMonitor& Monitor() const;
/// Gets a mutable reference to the system memory instance.
[[nodiscard]] Core::Memory::Memory& ApplicationMemory();

View File

@@ -166,6 +166,10 @@ u32 ProgramMetadata::GetSystemResourceSize() const {
return npdm_header.system_resource_size;
}
PoolPartition ProgramMetadata::GetPoolPartition() const {
return acid_header.pool_partition;
}
const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const {
return aci_kernel_capabilities;
}
@@ -201,7 +205,7 @@ void ProgramMetadata::Print() const {
// Begin ACID printing (potential perms, signed)
LOG_DEBUG(Service_FS, "Magic: {:.4}", acid_header.magic.data());
LOG_DEBUG(Service_FS, "Flags: 0x{:02X}", acid_header.flags);
LOG_DEBUG(Service_FS, " > Is Retail: {}", acid_header.is_retail ? "YES" : "NO");
LOG_DEBUG(Service_FS, " > Is Retail: {}", acid_header.production_flag ? "YES" : "NO");
LOG_DEBUG(Service_FS, "Title ID Min: 0x{:016X}", acid_header.title_id_min);
LOG_DEBUG(Service_FS, "Title ID Max: 0x{:016X}", acid_header.title_id_max);
LOG_DEBUG(Service_FS, "Filesystem Access: 0x{:016X}\n", acid_file_access.permissions);

View File

@@ -34,6 +34,13 @@ enum class ProgramFilePermission : u64 {
Everything = 1ULL << 63,
};
enum class PoolPartition : u32 {
Application = 0,
Applet = 1,
System = 2,
SystemNonSecure = 3,
};
/**
* Helper which implements an interface to parse Program Description Metadata (NPDM)
* Data can either be loaded from a file path or with data and an offset into it.
@@ -72,6 +79,7 @@ public:
u64 GetTitleID() const;
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
PoolPartition GetPoolPartition() const;
const KernelCapabilityDescriptors& GetKernelCapabilities() const;
const std::array<u8, 0x10>& GetName() const {
return npdm_header.application_name;
@@ -116,8 +124,9 @@ private:
union {
u32 flags;
BitField<0, 1, u32> is_retail;
BitField<1, 31, u32> flags_unk;
BitField<0, 1, u32> production_flag;
BitField<1, 1, u32> unqualified_approval;
BitField<2, 4, PoolPartition> pool_partition;
};
u64_le title_id_min;
u64_le title_id_max;

View File

@@ -4,6 +4,7 @@
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
#include "core/hle/kernel/k_thread.h"
@@ -26,9 +27,9 @@ bool ReadFromUser(KernelCore& kernel, s32* out, KProcessAddress address) {
return true;
}
bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
bool DecrementIfLessThan(KernelCore& kernel, s32* out, KProcessAddress address, s32 value) {
auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
const auto current_core = kernel.CurrentPhysicalCoreIndex();
// NOTE: If scheduler lock is not held here, interrupt disable is required.
// KScopedInterruptDisable di;
@@ -66,10 +67,10 @@ bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address
return true;
}
bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
bool UpdateIfEqual(KernelCore& kernel, s32* out, KProcessAddress address, s32 value,
s32 new_value) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
const auto current_core = kernel.CurrentPhysicalCoreIndex();
// NOTE: If scheduler lock is not held here, interrupt disable is required.
// KScopedInterruptDisable di;
@@ -159,7 +160,7 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32
// Check the userspace value.
s32 user_value{};
R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1),
R_UNLESS(UpdateIfEqual(m_kernel, std::addressof(user_value), addr, value, value + 1),
ResultInvalidCurrentMemory);
R_UNLESS(user_value == value, ResultInvalidState);
@@ -219,7 +220,7 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32
s32 user_value{};
bool succeeded{};
if (value != new_value) {
succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
succeeded = UpdateIfEqual(m_kernel, std::addressof(user_value), addr, value, new_value);
} else {
succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}
@@ -262,7 +263,7 @@ Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement,
s32 user_value{};
bool succeeded{};
if (decrement) {
succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
succeeded = DecrementIfLessThan(m_kernel, std::addressof(user_value), addr, value);
} else {
succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
}

View File

@@ -58,9 +58,8 @@ Result KClientPort::CreateSession(KClientSession** out) {
KSession* session{};
// Reserve a new session from the resource limit.
//! FIXME: we are reserving this from the wrong resource limit!
KScopedResourceReservation session_reservation(
m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(m_kernel),
LimitableResource::SessionCountMax);
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
// Allocate a session normally.

View File

@@ -28,10 +28,10 @@ bool WriteToUser(KernelCore& kernel, KProcessAddress address, const u32* p) {
return true;
}
bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
bool UpdateLockAtomic(KernelCore& kernel, u32* out, KProcessAddress address, u32 if_zero,
u32 new_orr_mask) {
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
auto& monitor = GetCurrentProcess(kernel).GetExclusiveMonitor();
const auto current_core = kernel.CurrentPhysicalCoreIndex();
u32 expected{};
@@ -208,7 +208,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
// TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true;
if (can_access) [[likely]] {
UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
UpdateLockAtomic(m_kernel, std::addressof(prev_tag), address, own_tag,
Svc::HandleWaitMask);
}
}

View File

@@ -30,7 +30,7 @@ public:
public:
explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
Result Initialize(s32 size) {
Result Initialize(KProcess* owner, s32 size) {
// Check that the table size is valid.
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
@@ -44,6 +44,7 @@ public:
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
m_owner = owner;
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
@@ -90,8 +91,8 @@ public:
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
//! FIXME: this is the wrong process!
auto* const cur_process = m_kernel.ApplicationProcess();
// TODO: this should be the current process
auto* const cur_process = m_owner;
ASSERT(cur_process != nullptr);
return cur_process;
}
@@ -301,6 +302,7 @@ private:
private:
KernelCore& m_kernel;
KProcess* m_owner{};
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
std::array<KAutoObject*, MaxTableSize> m_objects{};
mutable KSpinLock m_lock;

View File

@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
void KPageTableBase::Finalize() {
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
if (Settings::IsFastmemEnabled()) {
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
}
};
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
cur_pages, 0, false, unmap_properties,
OperationType::Unmap, true));
OperationType::UnmapPhysical, true));
}
// Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Map the papges.
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
cur_pg, map_properties,
OperationType::MapFirstGroup, false));
OperationType::MapFirstGroupPhysical, false));
}
}
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
unmap_properties, OperationType::Unmap, false));
unmap_properties, OperationType::UnmapPhysical, false));
}
// Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
// or free them to the page list, and so it goes unused (along with page properties).
switch (operation) {
case OperationType::Unmap: {
case OperationType::Unmap:
case OperationType::UnmapPhysical: {
const bool separate_heap = operation == OperationType::UnmapPhysical;
// Ensure that any pages we track are closed on exit.
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
// Unmap.
m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
R_SUCCEED();
}
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
ASSERT(virt_addr != 0);
ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
ConvertToMemoryPermission(properties.perm));
ConvertToMemoryPermission(properties.perm), false);
// Open references to pages, if we should.
if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
switch (operation) {
case OperationType::MapGroup:
case OperationType::MapFirstGroup: {
case OperationType::MapFirstGroup:
case OperationType::MapFirstGroupPhysical: {
const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
// We want to maintain a new reference to every page in the group.
KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
for (const auto& node : page_group) {
const size_t size{node.GetNumPages() * PageSize};
// Map the pages.
m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
ConvertToMemoryPermission(properties.perm));
ConvertToMemoryPermission(properties.perm), separate_heap);
virt_addr += size;
}

View File

@@ -104,6 +104,9 @@ protected:
ChangePermissionsAndRefresh = 5,
ChangePermissionsAndRefreshAndFlush = 6,
Separate = 7,
MapFirstGroupPhysical = 65000,
UnmapPhysical = 65001,
};
static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;

View File

@@ -306,12 +306,16 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
params.code_address, params.code_num_pages * PageSize,
m_system_resource, res_limit, this->GetMemory(), 0));
m_system_resource, res_limit, m_memory, 0));
}
ON_RESULT_FAILURE_2 {
m_page_table.Finalize();
};
// Ensure our memory is initialized.
m_memory.SetCurrentPageTable(*this);
m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
// Ensure we can insert the code region.
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
KMemoryState::Code),
@@ -399,12 +403,16 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool,
params.code_address, code_size, m_system_resource, res_limit,
this->GetMemory(), aslr_space_start));
m_memory, aslr_space_start));
}
ON_RESULT_FAILURE_2 {
m_page_table.Finalize();
};
// Ensure our memory is initialized.
m_memory.SetCurrentPageTable(*this);
m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
// Ensure we can insert the code region.
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
ResultInvalidMemoryRegion);
@@ -1094,8 +1102,7 @@ void KProcess::UnpinThread(KThread* thread) {
Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
s32 max_out_count) {
// TODO: use current memory reference
auto& memory = m_kernel.System().ApplicationMemory();
auto& memory = this->GetMemory();
// Lock the list.
KScopedLightLock lk(m_list_lock);
@@ -1128,14 +1135,15 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
KProcess::KProcess(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
m_handle_table{kernel} {}
m_handle_table{kernel}, m_dirty_memory_managers{},
m_exclusive_monitor{}, m_memory{kernel.System()} {}
KProcess::~KProcess() = default;
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
KProcessAddress aslr_space_start, bool is_hbl) {
// Create a resource limit for the process.
const auto physical_memory_size =
m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
const auto pool = static_cast<KMemoryManager::Pool>(metadata.GetPoolPartition());
const auto physical_memory_size = m_kernel.MemoryManager().GetSize(pool);
auto* res_limit =
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
@@ -1146,8 +1154,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
Svc::CreateProcessFlag flag{};
u64 code_address{};
// We are an application.
flag |= Svc::CreateProcessFlag::IsApplication;
// Determine if we are an application.
if (pool == KMemoryManager::Pool::Application) {
flag |= Svc::CreateProcessFlag::IsApplication;
}
// If we are 64-bit, create as such.
if (metadata.Is64BitProgram()) {
@@ -1196,8 +1206,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
std::memcpy(params.name.data(), name.data(), sizeof(params.name));
// Initialize for application process.
R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit,
KMemoryManager::Pool::Application, aslr_space_start));
R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit, pool,
aslr_space_start));
// Assign remaining properties.
m_is_hbl = is_hbl;
@@ -1223,22 +1233,25 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
#ifdef HAS_NCE
if (Settings::IsNceEnabled()) {
if (this->IsApplication() && Settings::IsNceEnabled()) {
auto& buffer = m_kernel.System().DeviceMemory().buffer;
const auto& code = code_set.CodeSegment();
const auto& patch = code_set.PatchSegment();
buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
buffer.Protect(GetInteger(base_addr + code.addr), code.size,
Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
}
#endif
}
void KProcess::InitializeInterfaces() {
this->GetMemory().SetCurrentPageTable(*this);
m_exclusive_monitor =
Core::MakeExclusiveMonitor(this->GetMemory(), Core::Hardware::NUM_CPU_CORES);
#ifdef HAS_NCE
if (this->Is64Bit() && Settings::IsNceEnabled()) {
if (this->IsApplication() && Settings::IsNceEnabled()) {
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
m_arm_interfaces[i] = std::make_unique<Core::ArmNce>(m_kernel.System(), true, i);
}
@@ -1248,13 +1261,13 @@ void KProcess::InitializeInterfaces() {
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic64>(
m_kernel.System(), m_kernel.IsMulticore(), this,
static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i);
static_cast<Core::DynarmicExclusiveMonitor&>(*m_exclusive_monitor), i);
}
} else {
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
m_arm_interfaces[i] = std::make_unique<Core::ArmDynarmic32>(
m_kernel.System(), m_kernel.IsMulticore(), this,
static_cast<Core::DynarmicExclusiveMonitor&>(m_kernel.GetExclusiveMonitor()), i);
static_cast<Core::DynarmicExclusiveMonitor&>(*m_exclusive_monitor), i);
}
}
}
@@ -1305,9 +1318,10 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
return true;
}
Core::Memory::Memory& KProcess::GetMemory() const {
// TODO: per-process memory
return m_kernel.System().ApplicationMemory();
void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
for (auto& manager : m_dirty_memory_managers) {
manager.Gather(callback);
}
}
} // namespace Kernel

View File

@@ -7,6 +7,7 @@
#include "core/arm/arm_interface.h"
#include "core/file_sys/program_metadata.h"
#include "core/gpu_dirty_memory_manager.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_capabilities.h"
@@ -17,6 +18,7 @@
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/memory.h"
namespace Kernel {
@@ -126,6 +128,9 @@ private:
#ifdef HAS_NCE
std::unordered_map<u64, u64> m_post_handlers{};
#endif
std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
Core::Memory::Memory m_memory;
private:
Result StartTermination();
@@ -502,7 +507,15 @@ public:
void InitializeInterfaces();
Core::Memory::Memory& GetMemory() const;
Core::Memory::Memory& GetMemory() {
return m_memory;
}
void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
Core::ExclusiveMonitor& GetExclusiveMonitor() const {
return *m_exclusive_monitor;
}
public:
// Overridden parent functions.
@@ -539,7 +552,7 @@ private:
Result InitializeHandleTable(s32 size) {
// Try to initialize the handle table.
R_TRY(m_handle_table.Initialize(size));
R_TRY(m_handle_table.Initialize(this, size));
// We succeeded, so note that we did.
m_is_handle_table_initialized = true;

File diff suppressed because it is too large Load Diff

View File

@@ -49,14 +49,21 @@ public:
bool IsSignaled() const override;
void OnClientClosed();
/// TODO: flesh these out to match the real kernel
Result OnRequest(KSessionRequest* request);
Result SendReply(bool is_hle = false);
Result ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context = nullptr,
Result SendReply(uintptr_t server_message, uintptr_t server_buffer_size,
KPhysicalAddress server_message_paddr, bool is_hle = false);
Result ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size,
KPhysicalAddress server_message_paddr,
std::shared_ptr<Service::HLERequestContext>* out_context = nullptr,
std::weak_ptr<Service::SessionRequestManager> manager = {});
Result SendReplyHLE() {
return SendReply(true);
R_RETURN(this->SendReply(0, 0, 0, true));
}
Result ReceiveRequestHLE(std::shared_ptr<Service::HLERequestContext>* out_context,
std::weak_ptr<Service::SessionRequestManager> manager) {
R_RETURN(this->ReceiveRequest(0, 0, 0, out_context, manager));
}
private:

View File

@@ -33,8 +33,7 @@ void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
m_name = name;
// Set our owner process.
//! FIXME: this is the wrong process!
m_process = m_kernel.ApplicationProcess();
m_process = GetCurrentProcessPointer(m_kernel);
m_process->Open();
// Set our port.

View File

@@ -1422,8 +1422,7 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
}
Core::Memory::Memory& GetCurrentMemory(KernelCore& kernel) {
// TODO: per-process memory
return kernel.System().ApplicationMemory();
return GetCurrentProcess(kernel).GetMemory();
}
KScopedDisableDispatch::~KScopedDisableDispatch() {

View File

@@ -314,11 +314,7 @@ public:
m_current_core_id = core;
}
KProcess* GetOwnerProcess() {
return m_parent;
}
const KProcess* GetOwnerProcess() const {
KProcess* GetOwnerProcess() const {
return m_parent;
}

View File

@@ -68,8 +68,6 @@ struct KernelCore::Impl {
global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
is_phantom_mode_for_singlecore = false;
@@ -121,13 +119,8 @@ struct KernelCore::Impl {
next_user_process_id = KProcess::ProcessIdMin;
next_thread_id = 1;
global_handle_table->Finalize();
global_handle_table.reset();
preemption_event = nullptr;
exclusive_monitor.reset();
// Cleanup persistent kernel objects
auto CleanupObject = [](KAutoObject* obj) {
if (obj) {
@@ -191,8 +184,6 @@ struct KernelCore::Impl {
}
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.ApplicationMemory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
const s32 core{static_cast<s32>(i)};
@@ -791,10 +782,6 @@ struct KernelCore::Impl {
std::shared_ptr<Core::Timing::EventType> preemption_event;
// This is the kernel's handle table or supervisor handle table which
// stores all the objects in place.
std::unique_ptr<KHandleTable> global_handle_table;
std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
@@ -805,7 +792,6 @@ struct KernelCore::Impl {
std::mutex server_lock;
std::vector<std::unique_ptr<Service::ServerManager>> server_managers;
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
@@ -882,10 +868,6 @@ KResourceLimit* KernelCore::GetSystemResourceLimit() {
return impl->system_resource_limit;
}
KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
return impl->global_handle_table->GetObject<KThread>(handle);
}
void KernelCore::AppendNewProcess(KProcess* process) {
impl->process_list.push_back(process);
}
@@ -959,14 +941,6 @@ Kernel::KHardwareTimer& KernelCore::HardwareTimer() {
return *impl->hardware_timer;
}
Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() {
return *impl->exclusive_monitor;
}
const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
return *impl->exclusive_monitor;
}
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
return *impl->global_object_list_container;
}
@@ -1030,14 +1004,6 @@ u64 KernelCore::CreateNewUserProcessID() {
return impl->next_user_process_id++;
}
KHandleTable& KernelCore::GlobalHandleTable() {
return *impl->global_handle_table;
}
const KHandleTable& KernelCore::GlobalHandleTable() const {
return *impl->global_handle_table;
}
void KernelCore::RegisterCoreThread(std::size_t core_id) {
impl->RegisterCoreThread(core_id);
}

View File

@@ -116,9 +116,6 @@ public:
/// Retrieves a shared pointer to the system resource limit instance.
KResourceLimit* GetSystemResourceLimit();
/// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
/// Adds the given shared pointer to an internal list of active processes.
void AppendNewProcess(KProcess* process);
@@ -170,10 +167,6 @@ public:
/// Stops execution of 'id' core, in order to reschedule a new thread.
void PrepareReschedule(std::size_t id);
Core::ExclusiveMonitor& GetExclusiveMonitor();
const Core::ExclusiveMonitor& GetExclusiveMonitor() const;
KAutoObjectWithListContainer& ObjectListContainer();
const KAutoObjectWithListContainer& ObjectListContainer() const;

View File

@@ -18,13 +18,13 @@ public:
static constexpr inline u64 NullTag = 0;
public:
enum class ReceiveListCountType : u32 {
None = 0,
ToMessageBuffer = 1,
ToSingleBuffer = 2,
enum ReceiveListCountType : u32 {
ReceiveListCountType_None = 0,
ReceiveListCountType_ToMessageBuffer = 1,
ReceiveListCountType_ToSingleBuffer = 2,
CountOffset = 2,
CountMax = 13,
ReceiveListCountType_CountOffset = 2,
ReceiveListCountType_CountMax = 13,
};
private:
@@ -591,16 +591,16 @@ public:
// Add the size of the receive list.
const auto count = hdr.GetReceiveListCount();
switch (count) {
case MessageHeader::ReceiveListCountType::None:
case MessageHeader::ReceiveListCountType_None:
break;
case MessageHeader::ReceiveListCountType::ToMessageBuffer:
case MessageHeader::ReceiveListCountType_ToMessageBuffer:
break;
case MessageHeader::ReceiveListCountType::ToSingleBuffer:
case MessageHeader::ReceiveListCountType_ToSingleBuffer:
msg_size += ReceiveListEntry::GetDataSize();
break;
default:
msg_size += (static_cast<s32>(count) -
static_cast<s32>(MessageHeader::ReceiveListCountType::CountOffset)) *
static_cast<s32>(MessageHeader::ReceiveListCountType_CountOffset)) *
ReceiveListEntry::GetDataSize();
break;
}

View File

@@ -118,7 +118,6 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
R_SUCCEED();
case InfoType::IsApplication:
LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
*result = process->IsApplication();
R_SUCCEED();

View File

@@ -48,8 +48,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
};
// Send the reply.
R_TRY(session->SendReply());
// R_TRY(session->SendReply(message, buffer_size, message_paddr));
R_TRY(session->SendReply(message, buffer_size, message_paddr));
}
// Receive a message.
@@ -85,8 +84,7 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
if (R_SUCCEEDED(result)) {
KServerSession* session = objs[index]->DynamicCast<KServerSession*>();
if (session != nullptr) {
// result = session->ReceiveRequest(message, buffer_size, message_paddr);
result = session->ReceiveRequest();
result = session->ReceiveRequest(message, buffer_size, message_paddr);
if (ResultNotFound == result) {
continue;
}

View File

@@ -38,7 +38,9 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
constexpr Result ResultReceiveListBroken{ErrorModule::Kernel, 258};
constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
constexpr Result ResultMessageTooLarge{ErrorModule::Kernel, 260};
constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel

View File

@@ -359,7 +359,7 @@ private:
void GetActiveChannelCount(HLERequestContext& ctx) {
const auto& sink{system.AudioCore().GetOutputSink()};
u32 channel_count{sink.GetDeviceChannels()};
u32 channel_count{sink.GetSystemChannels()};
LOG_DEBUG(Service_Audio, "(STUBBED) called. Channels={}", channel_count);

View File

@@ -89,7 +89,7 @@ static void GenerateErrorReport(Core::System& system, Result error_code, const F
crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
crash_report += fmt::format(" FAR: {:016x}\n", info.far);
crash_report += "\nBacktrace:\n";
for (size_t i = 0; i < info.backtrace_size; i++) {
for (u32 i = 0; i < std::min<u32>(info.backtrace_size, 32); i++) {
crash_report +=
fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
}

View File

@@ -54,6 +54,13 @@ enum class ImageDirectoryId : u32 {
SdCard,
};
enum class OpenDirectoryMode : u64 {
Directory = (1 << 0),
File = (1 << 1),
All = Directory | File
};
DECLARE_ENUM_FLAG_OPERATORS(OpenDirectoryMode);
class FileSystemController {
public:
explicit FileSystemController(Core::System& system_);

View File

@@ -259,7 +259,7 @@ static void BuildEntryIndex(std::vector<FileSys::Entry>& entries, const std::vec
class IDirectory final : public ServiceFramework<IDirectory> {
public:
explicit IDirectory(Core::System& system_, FileSys::VirtualDir backend_)
explicit IDirectory(Core::System& system_, FileSys::VirtualDir backend_, OpenDirectoryMode mode)
: ServiceFramework{system_, "IDirectory"}, backend(std::move(backend_)) {
static const FunctionInfo functions[] = {
{0, &IDirectory::Read, "Read"},
@@ -269,8 +269,12 @@ public:
// TODO(DarkLordZach): Verify that this is the correct behavior.
// Build entry index now to save time later.
BuildEntryIndex(entries, backend->GetFiles(), FileSys::EntryType::File);
BuildEntryIndex(entries, backend->GetSubdirectories(), FileSys::EntryType::Directory);
if (True(mode & OpenDirectoryMode::Directory)) {
BuildEntryIndex(entries, backend->GetSubdirectories(), FileSys::EntryType::Directory);
}
if (True(mode & OpenDirectoryMode::File)) {
BuildEntryIndex(entries, backend->GetFiles(), FileSys::EntryType::File);
}
}
private:
@@ -446,11 +450,9 @@ public:
const auto file_buffer = ctx.ReadBuffer();
const std::string name = Common::StringFromBuffer(file_buffer);
const auto mode = rp.PopRaw<OpenDirectoryMode>();
// TODO(Subv): Implement this filter.
const u32 filter_flags = rp.Pop<u32>();
LOG_DEBUG(Service_FS, "called. directory={}, filter={}", name, filter_flags);
LOG_DEBUG(Service_FS, "called. directory={}, mode={}", name, mode);
FileSys::VirtualDir vfs_dir{};
auto result = backend.OpenDirectory(&vfs_dir, name);
@@ -460,7 +462,7 @@ public:
return;
}
auto directory = std::make_shared<IDirectory>(system, vfs_dir);
auto directory = std::make_shared<IDirectory>(system, vfs_dir, mode);
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess);

View File

@@ -51,7 +51,7 @@ private:
IPC::RequestParser rp{ctx};
const auto vibration_device_handle{rp.PopRaw<Core::HID::VibrationDeviceHandle>()};
if (resource_manager != nullptr) {
if (resource_manager != nullptr && resource_manager->GetNpad()) {
resource_manager->GetNpad()->InitializeVibrationDevice(vibration_device_handle);
}

View File

@@ -151,8 +151,8 @@ public:
if (manager->IsDomain()) {
context->AddDomainObject(std::move(iface));
} else {
kernel.ApplicationProcess()->GetResourceLimit()->Reserve(
Kernel::LimitableResource::SessionCountMax, 1);
ASSERT(Kernel::GetCurrentProcess(kernel).GetResourceLimit()->Reserve(
Kernel::LimitableResource::SessionCountMax, 1));
auto* session = Kernel::KSession::Create(kernel);
session->Initialize(nullptr, 0);

View File

@@ -90,6 +90,18 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
LOG_DEBUG(Service_Nvnflinger, "acquiring slot={}", slot);
// If the front buffer is still being tracked, update its slot state
if (core->StillTracking(*front)) {
slots[slot].acquire_called = true;
slots[slot].needs_cleanup_on_release = false;
slots[slot].buffer_state = BufferState::Acquired;
// TODO: for now, avoid resetting the fence, so that when we next return this
// slot to the producer, it will wait for the fence to pass. We should fix this
// by properly waiting for the fence in the BufferItemConsumer.
// slots[slot].fence = Fence::NoFence();
}
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to
// avoid unnecessarily remapping this buffer on the consumer side.
if (out_buffer->acquire_called) {
@@ -132,11 +144,28 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
++current;
}
slots[slot].buffer_state = BufferState::Free;
if (slots[slot].buffer_state == BufferState::Acquired) {
// TODO: for now, avoid resetting the fence, so that when we next return this
// slot to the producer, it can wait for its own fence to pass. We should fix this
// by properly waiting for the fence in the BufferItemConsumer.
// slots[slot].fence = release_fence;
slots[slot].buffer_state = BufferState::Free;
listener = core->connected_producer_listener;
listener = core->connected_producer_listener;
LOG_DEBUG(Service_Nvnflinger, "releasing slot {}", slot);
LOG_DEBUG(Service_Nvnflinger, "releasing slot {}", slot);
} else if (slots[slot].needs_cleanup_on_release) {
LOG_DEBUG(Service_Nvnflinger, "releasing a stale buffer slot {} (state = {})", slot,
slots[slot].buffer_state);
slots[slot].needs_cleanup_on_release = false;
return Status::StaleBufferSlot;
} else {
LOG_ERROR(Service_Nvnflinger,
"attempted to release buffer slot {} but its state was {}", slot,
slots[slot].buffer_state);
return Status::BadValue;
}
core->SignalDequeueCondition();
}

View File

@@ -74,6 +74,10 @@ void BufferQueueCore::FreeBufferLocked(s32 slot) {
slots[slot].graphic_buffer.reset();
if (slots[slot].buffer_state == BufferState::Acquired) {
slots[slot].needs_cleanup_on_release = true;
}
slots[slot].buffer_state = BufferState::Free;
slots[slot].frame_number = UINT32_MAX;
slots[slot].acquire_called = false;

View File

@@ -31,6 +31,7 @@ struct BufferSlot final {
u64 frame_number{};
Fence fence;
bool acquire_called{};
bool needs_cleanup_on_release{};
bool attached_by_consumer{};
bool is_preallocated{};
};

View File

@@ -47,7 +47,7 @@ ServerManager::~ServerManager() {
m_stopped.Wait();
m_threads.clear();
// Clean up ports.
// Clean up server ports.
for (const auto& [port, handler] : m_ports) {
port->Close();
}
@@ -97,22 +97,15 @@ Result ServerManager::RegisterNamedService(const std::string& service_name,
u32 max_sessions) {
ASSERT(m_sessions.size() + m_ports.size() < MaximumWaitObjects);
// Add the new server to sm:.
ASSERT(R_SUCCEEDED(
m_system.ServiceManager().RegisterService(service_name, max_sessions, handler_factory)));
// Get the registered port.
Kernel::KPort* port{};
ASSERT(
R_SUCCEEDED(m_system.ServiceManager().GetServicePort(std::addressof(port), service_name)));
// Open a new reference to the server port.
port->GetServerPort().Open();
// Add the new server to sm: and get the moved server port.
Kernel::KServerPort* server_port{};
R_ASSERT(m_system.ServiceManager().RegisterService(std::addressof(server_port), service_name,
max_sessions, handler_factory));
// Begin tracking the server port.
{
std::scoped_lock ll{m_list_mutex};
m_ports.emplace(std::addressof(port->GetServerPort()), std::move(handler_factory));
m_ports.emplace(server_port, std::move(handler_factory));
}
// Signal the wakeup event.
@@ -372,7 +365,7 @@ Result ServerManager::OnSessionEvent(Kernel::KServerSession* session,
// Try to receive a message.
std::shared_ptr<HLERequestContext> context;
rc = session->ReceiveRequest(&context, manager);
rc = session->ReceiveRequestHLE(&context, manager);
// If the session has been closed, we're done.
if (rc == Kernel::ResultSessionClosed) {

View File

@@ -507,6 +507,14 @@ void SET_SYS::SetTvSettings(HLERequestContext& ctx) {
rb.Push(ResultSuccess);
}
void SET_SYS::GetDebugModeFlag(HLERequestContext& ctx) {
LOG_DEBUG(Service_SET, "called");
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(ResultSuccess);
rb.Push<u32>(0);
}
void SET_SYS::GetQuestFlag(HLERequestContext& ctx) {
LOG_WARNING(Service_SET, "(STUBBED) called");
@@ -926,7 +934,7 @@ SET_SYS::SET_SYS(Core::System& system_) : ServiceFramework{system_, "set:sys"},
{59, &SET_SYS::SetNetworkSystemClockContext, "SetNetworkSystemClockContext"},
{60, &SET_SYS::IsUserSystemClockAutomaticCorrectionEnabled, "IsUserSystemClockAutomaticCorrectionEnabled"},
{61, &SET_SYS::SetUserSystemClockAutomaticCorrectionEnabled, "SetUserSystemClockAutomaticCorrectionEnabled"},
{62, nullptr, "GetDebugModeFlag"},
{62, &SET_SYS::GetDebugModeFlag, "GetDebugModeFlag"},
{63, &SET_SYS::GetPrimaryAlbumStorage, "GetPrimaryAlbumStorage"},
{64, nullptr, "SetPrimaryAlbumStorage"},
{65, nullptr, "GetUsb30EnableFlag"},
@@ -1143,6 +1151,8 @@ void SET_SYS::StoreSettings() {
}
void SET_SYS::StoreSettingsThreadFunc(std::stop_token stop_token) {
Common::SetCurrentThreadName("SettingsStore");
while (Common::StoppableTimedWait(stop_token, std::chrono::minutes(1))) {
std::scoped_lock l{m_save_needed_mutex};
if (!std::exchange(m_save_needed, false)) {

View File

@@ -98,6 +98,7 @@ private:
void GetSettingsItemValue(HLERequestContext& ctx);
void GetTvSettings(HLERequestContext& ctx);
void SetTvSettings(HLERequestContext& ctx);
void GetDebugModeFlag(HLERequestContext& ctx);
void GetQuestFlag(HLERequestContext& ctx);
void GetDeviceTimeZoneLocationName(HLERequestContext& ctx);
void SetDeviceTimeZoneLocationName(HLERequestContext& ctx);

View File

@@ -29,8 +29,7 @@ ServiceManager::ServiceManager(Kernel::KernelCore& kernel_) : kernel{kernel_} {
ServiceManager::~ServiceManager() {
for (auto& [name, port] : service_ports) {
port->GetClientPort().Close();
port->GetServerPort().Close();
port->Close();
}
if (deferral_event) {
@@ -50,8 +49,8 @@ static Result ValidateServiceName(const std::string& name) {
return ResultSuccess;
}
Result ServiceManager::RegisterService(std::string name, u32 max_sessions,
SessionRequestHandlerFactory handler) {
Result ServiceManager::RegisterService(Kernel::KServerPort** out_server_port, std::string name,
u32 max_sessions, SessionRequestHandlerFactory handler) {
R_TRY(ValidateServiceName(name));
std::scoped_lock lk{lock};
@@ -66,13 +65,17 @@ Result ServiceManager::RegisterService(std::string name, u32 max_sessions,
// Register the port.
Kernel::KPort::Register(kernel, port);
service_ports.emplace(name, port);
service_ports.emplace(name, std::addressof(port->GetClientPort()));
registered_services.emplace(name, handler);
if (deferral_event) {
deferral_event->Signal();
}
return ResultSuccess;
// Set our output.
*out_server_port = std::addressof(port->GetServerPort());
// We succeeded.
R_SUCCEED();
}
Result ServiceManager::UnregisterService(const std::string& name) {
@@ -91,7 +94,8 @@ Result ServiceManager::UnregisterService(const std::string& name) {
return ResultSuccess;
}
Result ServiceManager::GetServicePort(Kernel::KPort** out_port, const std::string& name) {
Result ServiceManager::GetServicePort(Kernel::KClientPort** out_client_port,
const std::string& name) {
R_TRY(ValidateServiceName(name));
std::scoped_lock lk{lock};
@@ -101,7 +105,7 @@ Result ServiceManager::GetServicePort(Kernel::KPort** out_port, const std::strin
return Service::SM::ResultNotRegistered;
}
*out_port = it->second;
*out_client_port = it->second;
return ResultSuccess;
}
@@ -172,8 +176,8 @@ Result SM::GetServiceImpl(Kernel::KClientSession** out_client_session, HLEReques
std::string name(PopServiceName(rp));
// Find the named port.
Kernel::KPort* port{};
auto port_result = service_manager.GetServicePort(&port, name);
Kernel::KClientPort* client_port{};
auto port_result = service_manager.GetServicePort(&client_port, name);
if (port_result == Service::SM::ResultInvalidServiceName) {
LOG_ERROR(Service_SM, "Invalid service name '{}'", name);
return Service::SM::ResultInvalidServiceName;
@@ -187,7 +191,7 @@ Result SM::GetServiceImpl(Kernel::KClientSession** out_client_session, HLEReques
// Create a new session.
Kernel::KClientSession* session{};
if (const auto result = port->GetClientPort().CreateSession(&session); result.IsError()) {
if (const auto result = client_port->CreateSession(&session); result.IsError()) {
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw);
return result;
}
@@ -221,7 +225,9 @@ void SM::RegisterServiceImpl(HLERequestContext& ctx, std::string name, u32 max_s
LOG_DEBUG(Service_SM, "called with name={}, max_session_count={}, is_light={}", name,
max_session_count, is_light);
if (const auto result = service_manager.RegisterService(name, max_session_count, nullptr);
Kernel::KServerPort* server_port{};
if (const auto result = service_manager.RegisterService(std::addressof(server_port), name,
max_session_count, nullptr);
result.IsError()) {
LOG_ERROR(Service_SM, "failed to register service with error_code={:08X}", result.raw);
IPC::ResponseBuilder rb{ctx, 2};
@@ -229,13 +235,9 @@ void SM::RegisterServiceImpl(HLERequestContext& ctx, std::string name, u32 max_s
return;
}
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(ServerSessionCountMax, is_light, 0);
SCOPE_EXIT({ port->GetClientPort().Close(); });
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
rb.Push(ResultSuccess);
rb.PushMoveObjects(port->GetServerPort());
rb.PushMoveObjects(server_port);
}
void SM::UnregisterService(HLERequestContext& ctx) {

View File

@@ -56,10 +56,10 @@ public:
explicit ServiceManager(Kernel::KernelCore& kernel_);
~ServiceManager();
Result RegisterService(std::string name, u32 max_sessions,
SessionRequestHandlerFactory handler_factory);
Result RegisterService(Kernel::KServerPort** out_server_port, std::string name,
u32 max_sessions, SessionRequestHandlerFactory handler_factory);
Result UnregisterService(const std::string& name);
Result GetServicePort(Kernel::KPort** out_port, const std::string& name);
Result GetServicePort(Kernel::KClientPort** out_client_port, const std::string& name);
template <Common::DerivedFrom<SessionRequestHandler> T>
std::shared_ptr<T> GetService(const std::string& service_name) const {
@@ -84,7 +84,7 @@ private:
/// Map of registered services, retrieved using GetServicePort.
std::mutex lock;
std::unordered_map<std::string, SessionRequestHandlerFactory> registered_services;
std::unordered_map<std::string, Kernel::KPort*> service_ports;
std::unordered_map<std::string, Kernel::KClientPort*> service_ports;
/// Kernel context
Kernel::KernelCore& kernel;

View File

@@ -28,7 +28,6 @@ void Controller::ConvertCurrentObjectToDomain(HLERequestContext& ctx) {
void Controller::CloneCurrentObject(HLERequestContext& ctx) {
LOG_DEBUG(Service, "called");
auto& process = *ctx.GetThread().GetOwnerProcess();
auto session_manager = ctx.GetManager();
// FIXME: this is duplicated from the SVC, it should just call it instead
@@ -36,11 +35,11 @@ void Controller::CloneCurrentObject(HLERequestContext& ctx) {
// Reserve a new session from the process resource limit.
Kernel::KScopedResourceReservation session_reservation(
&process, Kernel::LimitableResource::SessionCountMax);
Kernel::GetCurrentProcessPointer(kernel), Kernel::LimitableResource::SessionCountMax);
ASSERT(session_reservation.Succeeded());
// Create the session.
Kernel::KSession* session = Kernel::KSession::Create(system.Kernel());
Kernel::KSession* session = Kernel::KSession::Create(kernel);
ASSERT(session != nullptr);
// Initialize the session.
@@ -50,7 +49,7 @@ void Controller::CloneCurrentObject(HLERequestContext& ctx) {
session_reservation.Commit();
// Register the session.
Kernel::KSession::Register(system.Kernel(), session);
Kernel::KSession::Register(kernel, session);
// Register with server manager.
session_manager->GetServerManager().RegisterSession(&session->GetServerSession(),

View File

@@ -129,9 +129,10 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
}
metadata.Print();
// Enable NCE only for programs with 39-bit address space.
// Enable NCE only for applications with 39-bit address space.
const bool is_39bit =
metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is39Bit;
const bool is_application = metadata.GetPoolPartition() == FileSys::PoolPartition::Application;
Settings::SetNceEnabled(is_39bit);
const std::array static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2",
@@ -147,7 +148,7 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
const auto GetPatcher = [&](size_t i) -> Core::NCE::Patcher* {
#ifdef HAS_NCE
if (Settings::IsNceEnabled()) {
if (is_application && Settings::IsNceEnabled()) {
return &module_patchers[i];
}
#endif
@@ -175,7 +176,7 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect
// Enable direct memory mapping in case of NCE.
const u64 fastmem_base = [&]() -> size_t {
if (Settings::IsNceEnabled()) {
if (is_application && Settings::IsNceEnabled()) {
auto& buffer = system.DeviceMemory().buffer;
buffer.EnableDirectMappedAddress();
return reinterpret_cast<u64>(buffer.VirtualBasePointer());

View File

@@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_types.h"
#include "common/heap_tracker.h"
#include "common/logging/log.h"
#include "common/page_table.h"
#include "common/scope_exit.h"
@@ -45,11 +46,25 @@ struct Memory::Impl {
void SetCurrentPageTable(Kernel::KProcess& process) {
current_page_table = &process.GetPageTable().GetImpl();
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
if (std::addressof(process) == system.ApplicationProcess() &&
Settings::IsFastmemEnabled()) {
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
} else {
current_page_table->fastmem_arena = nullptr;
}
#ifdef __linux__
heap_tracker.emplace(system.DeviceMemory().buffer);
buffer = std::addressof(*heap_tracker);
#else
buffer = std::addressof(system.DeviceMemory().buffer);
#endif
}
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
Common::PhysicalAddress target, Common::MemoryPermission perms) {
Common::PhysicalAddress target, Common::MemoryPermission perms,
bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -57,20 +72,21 @@ struct Memory::Impl {
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
Common::PageType::Memory);
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Map(GetInteger(base),
GetInteger(target) - DramMemoryMap::Base, size, perms);
if (current_page_table->fastmem_arena) {
buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
separate_heap);
}
}
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
Common::PageType::Unmapped);
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
if (current_page_table->fastmem_arena) {
buffer->Unmap(GetInteger(base), size, separate_heap);
}
}
@@ -79,17 +95,7 @@ struct Memory::Impl {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
if (!Settings::IsFastmemEnabled()) {
return;
}
const bool is_r = True(perms & Common::MemoryPermission::Read);
const bool is_w = True(perms & Common::MemoryPermission::Write);
const bool is_x =
True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
if (!current_page_table) {
system.DeviceMemory().buffer.Protect(vaddr, size, is_r, is_w, is_x);
if (!current_page_table->fastmem_arena) {
return;
}
@@ -101,8 +107,7 @@ struct Memory::Impl {
switch (page_type) {
case Common::PageType::RasterizerCachedMemory:
if (protect_bytes > 0) {
system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
is_x);
buffer->Protect(protect_begin, protect_bytes, perms);
protect_bytes = 0;
}
break;
@@ -115,7 +120,7 @@ struct Memory::Impl {
}
if (protect_bytes > 0) {
system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
buffer->Protect(protect_begin, protect_bytes, perms);
}
}
@@ -239,7 +244,7 @@ struct Memory::Impl {
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
auto on_memory, auto on_rasterizer, auto increment) {
const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
const auto& page_table = *current_page_table;
std::size_t remaining_size = size;
std::size_t page_index = addr >> YUZU_PAGEBITS;
std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -484,8 +489,10 @@ struct Memory::Impl {
return;
}
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug);
if (current_page_table->fastmem_arena) {
const auto perm{debug ? Common::MemoryPermission{}
: Common::MemoryPermission::ReadWrite};
buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -541,10 +548,15 @@ struct Memory::Impl {
return;
}
if (Settings::IsFastmemEnabled()) {
const bool is_read_enable =
!Settings::values.use_reactive_flushing.GetValue() || !cached;
system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
if (current_page_table->fastmem_arena) {
Common::MemoryPermission perm{};
if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
perm |= Common::MemoryPermission::Read;
}
if (!cached) {
perm |= Common::MemoryPermission::Write;
}
buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -718,6 +730,17 @@ struct Memory::Impl {
GetInteger(vaddr), []() {}, []() {});
}
void FixPageProtection(u64 vaddr) {
vaddr = Common::AlignDown(vaddr, YUZU_PAGESIZE);
if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] {
return;
}
ProtectRegion(*current_page_table, vaddr, YUZU_PAGESIZE,
Common::MemoryPermission::ReadWrite);
}
/**
* Reads a particular data type out of memory at the given virtual address.
*
@@ -855,6 +878,13 @@ struct Memory::Impl {
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
std::mutex sys_core_guard;
std::optional<Common::HeapTracker> heap_tracker;
#ifdef __linux__
Common::HeapTracker* buffer{};
#else
Common::HostMemory* buffer{};
#endif
};
Memory::Memory(Core::System& system_) : system{system_} {
@@ -872,12 +902,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
Common::PhysicalAddress target, Common::MemoryPermission perms) {
impl->MapMemoryRegion(page_table, base, size, target, perms);
Common::PhysicalAddress target, Common::MemoryPermission perms,
bool separate_heap) {
impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
}
void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
impl->UnmapRegion(page_table, base, size);
void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
bool separate_heap) {
impl->UnmapRegion(page_table, base, size, separate_heap);
}
void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -886,8 +918,7 @@ void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress
}
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
const auto& page_table = process.GetPageTable().GetImpl();
const auto& page_table = *impl->current_page_table;
const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) {
return false;
@@ -1048,7 +1079,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
}
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
bool mapped = true;
[[maybe_unused]] bool mapped = true;
[[maybe_unused]] bool rasterizer = false;
u8* const ptr = impl->GetPointerImpl(
GetInteger(vaddr),
[&] {
@@ -1056,8 +1089,31 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
GetInteger(vaddr));
mapped = false;
},
[&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); });
return mapped && ptr != nullptr;
[&] {
impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
rasterizer = true;
});
const bool mapping_exists = mapped && ptr != nullptr;
#ifdef __linux__
if (mapping_exists && !rasterizer) {
if (!impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr))) {
// GPU may have raced reprotecting this page, try to fix it.
impl->FixPageProtection(GetInteger(vaddr));
}
}
#endif
return mapping_exists;
}
bool Memory::InvalidateSeparateHeap(void* fault_address) {
#ifdef __linux__
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
#else
return false;
#endif
}
} // namespace Core::Memory

View File

@@ -86,7 +86,8 @@ public:
* @param perms The permissions to map the memory with.
*/
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
Common::PhysicalAddress target, Common::MemoryPermission perms);
Common::PhysicalAddress target, Common::MemoryPermission perms,
bool separate_heap);
/**
* Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
* @param base The address to begin unmapping at.
* @param size The amount of bytes to unmap.
*/
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
bool separate_heap);
/**
* Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
bool InvalidateSeparateHeap(void* fault_address);
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
private:

View File

@@ -214,16 +214,16 @@ Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& ind
}
}
Id Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) {
std::pair<Id, bool> Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) {
if (!index.IsImmediate() || index.U32() != 0) {
throw NotImplementedException("Indirect image indexing");
}
if (info.type == TextureType::Buffer) {
const ImageBufferDefinition def{ctx.image_buffers.at(info.descriptor_index)};
return ctx.OpLoad(def.image_type, def.id);
return {ctx.OpLoad(def.image_type, def.id), def.is_integer};
} else {
const ImageDefinition def{ctx.images.at(info.descriptor_index)};
return ctx.OpLoad(def.image_type, def.id);
return {ctx.OpLoad(def.image_type, def.id), def.is_integer};
}
}
@@ -566,13 +566,23 @@ Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id co
LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host");
return ctx.ConstantNull(ctx.U32[4]);
}
return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4],
Image(ctx, index, info), coords, std::nullopt, std::span<const Id>{});
const auto [image, is_integer] = Image(ctx, index, info);
const Id result_type{is_integer ? ctx.U32[4] : ctx.F32[4]};
Id color{Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst,
result_type, image, coords, std::nullopt, std::span<const Id>{})};
if (!is_integer) {
color = ctx.OpBitcast(ctx.U32[4], color);
}
return color;
}
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
ctx.OpImageWrite(Image(ctx, index, info), coords, color);
const auto [image, is_integer] = Image(ctx, index, info);
if (!is_integer) {
color = ctx.OpBitcast(ctx.F32[4], color);
}
ctx.OpImageWrite(image, coords, color);
}
Id EmitIsTextureScaled(EmitContext& ctx, const IR::Value& index) {

View File

@@ -74,20 +74,19 @@ spv::ImageFormat GetImageFormat(ImageFormat format) {
throw InvalidArgument("Invalid image format {}", format);
}
Id ImageType(EmitContext& ctx, const ImageDescriptor& desc) {
Id ImageType(EmitContext& ctx, const ImageDescriptor& desc, Id sampled_type) {
const spv::ImageFormat format{GetImageFormat(desc.format)};
const Id type{ctx.U32[1]};
switch (desc.type) {
case TextureType::Color1D:
return ctx.TypeImage(type, spv::Dim::Dim1D, false, false, false, 2, format);
return ctx.TypeImage(sampled_type, spv::Dim::Dim1D, false, false, false, 2, format);
case TextureType::ColorArray1D:
return ctx.TypeImage(type, spv::Dim::Dim1D, false, true, false, 2, format);
return ctx.TypeImage(sampled_type, spv::Dim::Dim1D, false, true, false, 2, format);
case TextureType::Color2D:
return ctx.TypeImage(type, spv::Dim::Dim2D, false, false, false, 2, format);
return ctx.TypeImage(sampled_type, spv::Dim::Dim2D, false, false, false, 2, format);
case TextureType::ColorArray2D:
return ctx.TypeImage(type, spv::Dim::Dim2D, false, true, false, 2, format);
return ctx.TypeImage(sampled_type, spv::Dim::Dim2D, false, true, false, 2, format);
case TextureType::Color3D:
return ctx.TypeImage(type, spv::Dim::Dim3D, false, false, false, 2, format);
return ctx.TypeImage(sampled_type, spv::Dim::Dim3D, false, false, false, 2, format);
case TextureType::Buffer:
throw NotImplementedException("Image buffer");
default:
@@ -1273,7 +1272,9 @@ void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
throw NotImplementedException("Array of image buffers");
}
const spv::ImageFormat format{GetImageFormat(desc.format)};
const Id image_type{TypeImage(U32[1], spv::Dim::Buffer, false, false, false, 2, format)};
const Id sampled_type{desc.is_integer ? U32[1] : F32[1]};
const Id image_type{
TypeImage(sampled_type, spv::Dim::Buffer, false, false, false, 2, format)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
Decorate(id, spv::Decoration::Binding, binding);
@@ -1283,6 +1284,7 @@ void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
.id = id,
.image_type = image_type,
.count = desc.count,
.is_integer = desc.is_integer,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);
@@ -1327,7 +1329,8 @@ void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_inde
if (desc.count != 1) {
throw NotImplementedException("Array of images");
}
const Id image_type{ImageType(*this, desc)};
const Id sampled_type{desc.is_integer ? U32[1] : F32[1]};
const Id image_type{ImageType(*this, desc, sampled_type)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
Decorate(id, spv::Decoration::Binding, binding);
@@ -1337,6 +1340,7 @@ void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_inde
.id = id,
.image_type = image_type,
.count = desc.count,
.is_integer = desc.is_integer,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);

View File

@@ -47,12 +47,14 @@ struct ImageBufferDefinition {
Id id;
Id image_type;
u32 count;
bool is_integer;
};
struct ImageDefinition {
Id id;
Id image_type;
u32 count;
bool is_integer;
};
struct UniformDefinitions {

View File

@@ -24,6 +24,8 @@ public:
[[nodiscard]] virtual TexturePixelFormat ReadTexturePixelFormat(u32 raw_handle) = 0;
[[nodiscard]] virtual bool IsTexturePixelFormatInteger(u32 raw_handle) = 0;
[[nodiscard]] virtual u32 ReadViewportTransformState() = 0;
[[nodiscard]] virtual u32 TextureBoundBuffer() const = 0;

View File

@@ -815,6 +815,15 @@ bool FindGradient3DDerivatives(std::array<IR::Value, 3>& results, IR::Value coor
return true;
}
void ConvertDerivatives(std::array<IR::Value, 3>& results, IR::IREmitter& ir) {
for (size_t i = 0; i < 3; i++) {
if (results[i].Type() == IR::Type::U32) {
results[i] = results[i].IsImmediate() ? ir.Imm32(Common::BitCast<f32>(results[i].U32()))
: ir.BitCast<IR::F32>(IR::U32(results[i]));
}
}
}
void FoldImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
IR::TextureInstInfo info = inst.Flags<IR::TextureInstInfo>();
auto orig_opcode = inst.GetOpcode();
@@ -831,12 +840,14 @@ void FoldImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
if (!offset.IsImmediate()) {
return;
}
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
IR::Inst* const inst2 = coords.InstRecursive();
std::array<std::array<IR::Value, 3>, 3> results_matrix;
for (size_t i = 0; i < 3; i++) {
if (!FindGradient3DDerivatives(results_matrix[i], inst2->Arg(i).Resolve())) {
return;
}
ConvertDerivatives(results_matrix[i], ir);
}
IR::F32 lod_clamp{};
if (info.has_lod_clamp != 0) {
@@ -846,7 +857,6 @@ void FoldImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
lod_clamp = IR::F32{bias_lc};
}
}
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
IR::Value new_coords =
ir.CompositeConstruct(results_matrix[0][0], results_matrix[1][0], results_matrix[2][0]);
IR::Value derivatives_1 = ir.CompositeConstruct(results_matrix[0][1], results_matrix[0][2],

View File

@@ -372,6 +372,10 @@ TexturePixelFormat ReadTexturePixelFormat(Environment& env, const ConstBufferAdd
return env.ReadTexturePixelFormat(GetTextureHandle(env, cbuf));
}
bool IsTexturePixelFormatInteger(Environment& env, const ConstBufferAddr& cbuf) {
return env.IsTexturePixelFormatInteger(GetTextureHandle(env, cbuf));
}
class Descriptors {
public:
explicit Descriptors(TextureBufferDescriptors& texture_buffer_descriptors_,
@@ -403,6 +407,7 @@ public:
})};
image_buffer_descriptors[index].is_written |= desc.is_written;
image_buffer_descriptors[index].is_read |= desc.is_read;
image_buffer_descriptors[index].is_integer |= desc.is_integer;
return index;
}
@@ -432,6 +437,7 @@ public:
})};
image_descriptors[index].is_written |= desc.is_written;
image_descriptors[index].is_read |= desc.is_read;
image_descriptors[index].is_integer |= desc.is_integer;
return index;
}
@@ -469,6 +475,20 @@ void PatchImageSampleImplicitLod(IR::Block& block, IR::Inst& inst) {
ir.FPRecip(ir.ConvertUToF(32, 32, ir.CompositeExtract(texture_size, 1))))));
}
bool IsPixelFormatSNorm(TexturePixelFormat pixel_format) {
switch (pixel_format) {
case TexturePixelFormat::A8B8G8R8_SNORM:
case TexturePixelFormat::R8G8_SNORM:
case TexturePixelFormat::R8_SNORM:
case TexturePixelFormat::R16G16B16A16_SNORM:
case TexturePixelFormat::R16G16_SNORM:
case TexturePixelFormat::R16_SNORM:
return true;
default:
return false;
}
}
void PatchTexelFetch(IR::Block& block, IR::Inst& inst, TexturePixelFormat pixel_format) {
const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
@@ -587,11 +607,13 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
}
const bool is_written{inst->GetOpcode() != IR::Opcode::ImageRead};
const bool is_read{inst->GetOpcode() != IR::Opcode::ImageWrite};
const bool is_integer{IsTexturePixelFormatInteger(env, cbuf)};
if (flags.type == TextureType::Buffer) {
index = descriptors.Add(ImageBufferDescriptor{
.format = flags.image_format,
.is_written = is_written,
.is_read = is_read,
.is_integer = is_integer,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
.count = cbuf.count,
@@ -603,6 +625,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
.format = flags.image_format,
.is_written = is_written,
.is_read = is_read,
.is_integer = is_integer,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
.count = cbuf.count,
@@ -658,7 +681,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
if (!host_info.support_snorm_render_buffer && inst->GetOpcode() == IR::Opcode::ImageFetch &&
flags.type == TextureType::Buffer) {
const auto pixel_format = ReadTexturePixelFormat(env, cbuf);
if (pixel_format != TexturePixelFormat::OTHER) {
if (IsPixelFormatSNorm(pixel_format)) {
PatchTexelFetch(*texture_inst.block, *texture_inst.inst, pixel_format);
}
}

View File

@@ -35,14 +35,109 @@ enum class TextureType : u32 {
};
constexpr u32 NUM_TEXTURE_TYPES = 9;
enum class TexturePixelFormat : u32 {
enum class TexturePixelFormat {
A8B8G8R8_UNORM,
A8B8G8R8_SNORM,
A8B8G8R8_SINT,
A8B8G8R8_UINT,
R5G6B5_UNORM,
B5G6R5_UNORM,
A1R5G5B5_UNORM,
A2B10G10R10_UNORM,
A2B10G10R10_UINT,
A2R10G10B10_UNORM,
A1B5G5R5_UNORM,
A5B5G5R1_UNORM,
R8_UNORM,
R8_SNORM,
R8G8_SNORM,
R8_SINT,
R8_UINT,
R16G16B16A16_FLOAT,
R16G16B16A16_UNORM,
R16G16B16A16_SNORM,
R16G16_SNORM,
R16G16B16A16_SINT,
R16G16B16A16_UINT,
B10G11R11_FLOAT,
R32G32B32A32_UINT,
BC1_RGBA_UNORM,
BC2_UNORM,
BC3_UNORM,
BC4_UNORM,
BC4_SNORM,
BC5_UNORM,
BC5_SNORM,
BC7_UNORM,
BC6H_UFLOAT,
BC6H_SFLOAT,
ASTC_2D_4X4_UNORM,
B8G8R8A8_UNORM,
R32G32B32A32_FLOAT,
R32G32B32A32_SINT,
R32G32_FLOAT,
R32G32_SINT,
R32_FLOAT,
R16_FLOAT,
R16_UNORM,
R16_SNORM,
OTHER
R16_UINT,
R16_SINT,
R16G16_UNORM,
R16G16_FLOAT,
R16G16_UINT,
R16G16_SINT,
R16G16_SNORM,
R32G32B32_FLOAT,
A8B8G8R8_SRGB,
R8G8_UNORM,
R8G8_SNORM,
R8G8_SINT,
R8G8_UINT,
R32G32_UINT,
R16G16B16X16_FLOAT,
R32_UINT,
R32_SINT,
ASTC_2D_8X8_UNORM,
ASTC_2D_8X5_UNORM,
ASTC_2D_5X4_UNORM,
B8G8R8A8_SRGB,
BC1_RGBA_SRGB,
BC2_SRGB,
BC3_SRGB,
BC7_SRGB,
A4B4G4R4_UNORM,
G4R4_UNORM,
ASTC_2D_4X4_SRGB,
ASTC_2D_8X8_SRGB,
ASTC_2D_8X5_SRGB,
ASTC_2D_5X4_SRGB,
ASTC_2D_5X5_UNORM,
ASTC_2D_5X5_SRGB,
ASTC_2D_10X8_UNORM,
ASTC_2D_10X8_SRGB,
ASTC_2D_6X6_UNORM,
ASTC_2D_6X6_SRGB,
ASTC_2D_10X6_UNORM,
ASTC_2D_10X6_SRGB,
ASTC_2D_10X5_UNORM,
ASTC_2D_10X5_SRGB,
ASTC_2D_10X10_UNORM,
ASTC_2D_10X10_SRGB,
ASTC_2D_12X10_UNORM,
ASTC_2D_12X10_SRGB,
ASTC_2D_12X12_UNORM,
ASTC_2D_12X12_SRGB,
ASTC_2D_8X6_UNORM,
ASTC_2D_8X6_SRGB,
ASTC_2D_6X5_UNORM,
ASTC_2D_6X5_SRGB,
E5B9G9R9_FLOAT,
D32_FLOAT,
D16_UNORM,
X8_D24_UNORM,
S8_UINT,
D24_UNORM_S8_UINT,
S8_UINT_D24_UNORM,
D32_FLOAT_S8_UINT,
};
enum class ImageFormat : u32 {
@@ -97,6 +192,7 @@ struct ImageBufferDescriptor {
ImageFormat format;
bool is_written;
bool is_read;
bool is_integer;
u32 cbuf_index;
u32 cbuf_offset;
u32 count;
@@ -129,6 +225,7 @@ struct ImageDescriptor {
ImageFormat format;
bool is_written;
bool is_read;
bool is_integer;
u32 cbuf_index;
u32 cbuf_offset;
u32 count;

View File

@@ -12,6 +12,7 @@ using namespace Common::Literals;
static constexpr size_t VIRTUAL_SIZE = 1ULL << 39;
static constexpr size_t BACKING_SIZE = 4_GiB;
static constexpr auto PERMS = Common::MemoryPermission::ReadWrite;
static constexpr auto HEAP = false;
TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
{ HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); }
@@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
TEST_CASE("HostMemory: Simple map", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x5000, 0x8000, 0x1000, PERMS);
mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[0] = 50;
@@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") {
TEST_CASE("HostMemory: Simple mirror map", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x5000, 0x3000, 0x2000, PERMS);
mem.Map(0x8000, 0x4000, 0x1000, PERMS);
mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP);
volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000;
volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000;
@@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") {
TEST_CASE("HostMemory: Simple unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x5000, 0x3000, 0x2000, PERMS);
mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[75] = 50;
REQUIRE(data[75] == 50);
mem.Unmap(0x5000, 0x2000);
mem.Unmap(0x5000, 0x2000, HEAP);
}
TEST_CASE("HostMemory: Simple unmap and remap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x5000, 0x3000, 0x2000, PERMS);
mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
data[0] = 50;
REQUIRE(data[0] == 50);
mem.Unmap(0x5000, 0x2000);
mem.Unmap(0x5000, 0x2000, HEAP);
mem.Map(0x5000, 0x3000, 0x2000, PERMS);
mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
REQUIRE(data[0] == 50);
mem.Map(0x7000, 0x2000, 0x5000, PERMS);
mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP);
REQUIRE(data[0x3000] == 50);
}
TEST_CASE("HostMemory: Nieche allocation", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x0000, 0, 0x20000, PERMS);
mem.Unmap(0x0000, 0x4000);
mem.Map(0x1000, 0, 0x2000, PERMS);
mem.Map(0x3000, 0, 0x1000, PERMS);
mem.Map(0, 0, 0x1000, PERMS);
mem.Map(0x0000, 0, 0x20000, PERMS, HEAP);
mem.Unmap(0x0000, 0x4000, HEAP);
mem.Map(0x1000, 0, 0x2000, PERMS, HEAP);
mem.Map(0x3000, 0, 0x1000, PERMS, HEAP);
mem.Map(0, 0, 0x1000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Full unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x8000, 0, 0x4000, PERMS);
mem.Unmap(0x8000, 0x4000);
mem.Map(0x6000, 0, 0x16000, PERMS);
mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x8000, 0x4000, HEAP);
mem.Map(0x6000, 0, 0x16000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x0000, 0, 0x4000, PERMS);
mem.Unmap(0x2000, 0x4000);
mem.Map(0x2000, 0x80000, 0x4000, PERMS);
mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x2000, 0x4000, HEAP);
mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x8000, 0, 0x4000, PERMS);
mem.Unmap(0x6000, 0x4000);
mem.Map(0x8000, 0, 0x2000, PERMS);
mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x6000, 0x4000, HEAP);
mem.Map(0x8000, 0, 0x2000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x0000, 0, 0x4000, PERMS);
mem.Map(0x4000, 0, 0x1b000, PERMS);
mem.Unmap(0x3000, 0x1c000);
mem.Map(0x3000, 0, 0x20000, PERMS);
mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP);
mem.Unmap(0x3000, 0x1c000, HEAP);
mem.Map(0x3000, 0, 0x20000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap between placeholders", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x0000, 0, 0x4000, PERMS);
mem.Map(0x4000, 0, 0x4000, PERMS);
mem.Unmap(0x2000, 0x4000);
mem.Map(0x2000, 0, 0x4000, PERMS);
mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x2000, 0x4000, HEAP);
mem.Map(0x2000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap to origin", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0, 0x4000, PERMS);
mem.Map(0x8000, 0, 0x4000, PERMS);
mem.Unmap(0x4000, 0x4000);
mem.Map(0, 0, 0x4000, PERMS);
mem.Map(0x4000, 0, 0x4000, PERMS);
mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x4000, 0x4000, HEAP);
mem.Map(0, 0, 0x4000, PERMS, HEAP);
mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Unmap to right", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0, 0x4000, PERMS);
mem.Map(0x8000, 0, 0x4000, PERMS);
mem.Unmap(0x8000, 0x4000);
mem.Map(0x8000, 0, 0x4000, PERMS);
mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
mem.Unmap(0x8000, 0x4000, HEAP);
mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
}
TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0x10000, 0x4000, PERMS);
mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x1000] = 17;
mem.Unmap(0x6000, 0x2000);
mem.Unmap(0x6000, 0x2000, HEAP);
REQUIRE(ptr[0x1000] == 17);
}
TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0x10000, 0x4000, PERMS);
mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x3000] = 19;
ptr[0x3fff] = 12;
mem.Unmap(0x4000, 0x2000);
mem.Unmap(0x4000, 0x2000, HEAP);
REQUIRE(ptr[0x3000] == 19);
REQUIRE(ptr[0x3fff] == 12);
@@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0x10000, 0x4000, PERMS);
mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x0000] = 19;
ptr[0x3fff] = 12;
mem.Unmap(0x1000, 0x2000);
mem.Unmap(0x1000, 0x2000, HEAP);
REQUIRE(ptr[0x0000] == 19);
REQUIRE(ptr[0x3fff] == 12);
@@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") {
HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
mem.Map(0x4000, 0x10000, 0x2000, PERMS);
mem.Map(0x6000, 0x20000, 0x2000, PERMS);
mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP);
mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP);
volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
ptr[0x0000] = 19;
ptr[0x3fff] = 12;
mem.Unmap(0x5000, 0x2000);
mem.Unmap(0x5000, 0x2000, HEAP);
REQUIRE(ptr[0x0000] == 19);
REQUIRE(ptr[0x3fff] == 12);

View File

@@ -23,13 +23,13 @@ constexpr VAddr c = 16 * HIGH_PAGE_SIZE;
class RasterizerInterface {
public:
void UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
Core::Memory::YUZU_PAGEBITS};
for (u64 page = page_start; page < page_end; ++page) {
int& value = page_table[page];
value += (cache ? 1 : -1);
value += delta;
if (value < 0) {
throw std::logic_error{"negative page"};
}
@@ -546,4 +546,4 @@ TEST_CASE("MemoryTracker: Cached write downloads") {
REQUIRE(!memory_track->IsRegionGpuModified(c + PAGE, PAGE));
memory_track->MarkRegionAsCpuModified(c, WORD);
REQUIRE(rasterizer.Count() == 0);
}
}

View File

@@ -473,7 +473,7 @@ private:
VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
IteratePages(changed_bits, [&](size_t offset, size_t size) {
rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE,
size * BYTES_PER_PAGE, add_to_rasterizer);
size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1);
});
}

View File

@@ -586,14 +586,22 @@ void Maxwell3D::ProcessQueryCondition() {
}
void Maxwell3D::ProcessCounterReset() {
switch (regs.clear_report_value) {
case Regs::ClearReport::ZPassPixelCount:
rasterizer->ResetCounter(VideoCommon::QueryType::ZPassPixelCount64);
break;
default:
LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value);
break;
}
const auto query_type = [clear_report = regs.clear_report_value]() {
switch (clear_report) {
case Tegra::Engines::Maxwell3D::Regs::ClearReport::ZPassPixelCount:
return VideoCommon::QueryType::ZPassPixelCount64;
case Tegra::Engines::Maxwell3D::Regs::ClearReport::StreamingPrimitivesSucceeded:
return VideoCommon::QueryType::StreamingPrimitivesSucceeded;
case Tegra::Engines::Maxwell3D::Regs::ClearReport::PrimitivesGenerated:
return VideoCommon::QueryType::PrimitivesGenerated;
case Tegra::Engines::Maxwell3D::Regs::ClearReport::VtgPrimitivesOut:
return VideoCommon::QueryType::VtgPrimitivesOut;
default:
LOG_DEBUG(HW_GPU, "Unimplemented counter reset={}", clear_report);
return VideoCommon::QueryType::Payload;
}
}();
rasterizer->ResetCounter(query_type);
}
void Maxwell3D::ProcessSyncPoint() {

View File

@@ -270,7 +270,7 @@ private:
std::jthread fence_thread;
DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
DelayedDestructionRing<TFence, 8> delayed_destruction_ring;
};
} // namespace VideoCommon

View File

@@ -28,8 +28,11 @@
namespace VideoCore {
enum class QueryType {
SamplesPassed,
PrimitivesGenerated,
TfbPrimitivesWritten,
Count,
};
constexpr std::size_t NumQueryTypes = 1;
constexpr std::size_t NumQueryTypes = static_cast<size_t>(QueryType::Count);
} // namespace VideoCore
namespace VideoCommon {
@@ -44,15 +47,6 @@ public:
explicit CounterStreamBase(QueryCache& cache_, VideoCore::QueryType type_)
: cache{cache_}, type{type_} {}
/// Updates the state of the stream, enabling or disabling as needed.
void Update(bool enabled) {
if (enabled) {
Enable();
} else {
Disable();
}
}
/// Resets the stream to zero. It doesn't disable the query after resetting.
void Reset() {
if (current) {
@@ -80,7 +74,6 @@ public:
return current != nullptr;
}
private:
/// Enables the stream.
void Enable() {
if (current) {
@@ -97,6 +90,7 @@ private:
last = std::exchange(current, nullptr);
}
private:
QueryCache& cache;
const VideoCore::QueryType type;
@@ -112,8 +106,14 @@ public:
: rasterizer{rasterizer_},
// Use reinterpret_cast instead of static_cast as workaround for
// UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
cpu_memory{cpu_memory_}, streams{{CounterStream{reinterpret_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}}} {
cpu_memory{cpu_memory_}, streams{{
{CounterStream{reinterpret_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}},
{CounterStream{reinterpret_cast<QueryCache&>(*this),
VideoCore::QueryType::PrimitivesGenerated}},
{CounterStream{reinterpret_cast<QueryCache&>(*this),
VideoCore::QueryType::TfbPrimitivesWritten}},
}} {
(void)slot_async_jobs.insert(); // Null value
}
@@ -157,12 +157,11 @@ public:
AsyncFlushQuery(query, timestamp, lock);
}
/// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
void UpdateCounters() {
/// Enables all available GPU counters
void EnableCounters() {
std::unique_lock lock{mutex};
if (maxwell3d) {
const auto& regs = maxwell3d->regs;
Stream(VideoCore::QueryType::SamplesPassed).Update(regs.zpass_pixel_count_enable);
for (auto& stream : streams) {
stream.Enable();
}
}
@@ -176,7 +175,7 @@ public:
void DisableStreams() {
std::unique_lock lock{mutex};
for (auto& stream : streams) {
stream.Update(false);
stream.Disable();
}
}
@@ -353,7 +352,7 @@ private:
std::shared_ptr<std::vector<AsyncJobId>> uncommitted_flushes{};
std::list<std::shared_ptr<std::vector<AsyncJobId>>> committed_flushes;
};
}; // namespace VideoCommon
template <class QueryCache, class HostCounter>
class HostCounterBase {

View File

@@ -3,7 +3,6 @@
#include <atomic>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/div_ceil.h"
@@ -12,65 +11,61 @@
namespace VideoCore {
static constexpr u16 IdentityValue = 1;
using namespace Core::Memory;
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : map{}, cpu_memory{cpu_memory_} {
// We are tracking CPU memory, which cannot map more than 39 bits.
const VAddr start_address = 0;
const VAddr end_address = (1ULL << 39);
const IntervalType address_space_interval(start_address, end_address);
const auto value = std::make_pair(address_space_interval, IdentityValue);
map.add(value);
}
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
: cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
RasterizerAccelerated::~RasterizerAccelerated() = default;
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {
std::scoped_lock lk{map_lock};
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
u64 uncache_begin = 0;
u64 cache_begin = 0;
u64 uncache_bytes = 0;
u64 cache_bytes = 0;
// Align sizes.
addr = Common::AlignDown(addr, YUZU_PAGESIZE);
size = Common::AlignUp(size, YUZU_PAGESIZE);
std::atomic_thread_fence(std::memory_order_acquire);
const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
// Declare the overall interval we are going to operate on.
const VAddr start_address = addr;
const VAddr end_address = addr + size;
const IntervalType modification_range(start_address, end_address);
if (delta > 0) {
ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
} else if (delta < 0) {
ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
} else {
ASSERT_MSG(false, "Delta must be non-zero!");
}
// Find the boundaries of where to iterate.
const auto lower = map.lower_bound(modification_range);
const auto upper = map.upper_bound(modification_range);
// Adds or subtracts 1, as count is a unsigned 8-bit value
count.fetch_add(static_cast<u16>(delta), std::memory_order_release);
// Iterate over the contained intervals.
for (auto it = lower; it != upper; it++) {
// Intersect interval range with modification range.
const auto current_range = modification_range & it->first;
// Calculate the address and size to operate over.
const auto current_addr = current_range.lower();
const auto current_size = current_range.upper() - current_addr;
// Get the current value of the range.
const auto value = it->second;
if (cache && value == IdentityValue) {
// If we are going to cache, and the value is not yet referenced, then cache this range.
cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, true);
} else if (!cache && value == IdentityValue + 1) {
// If we are going to uncache, and this is the last reference, then uncache this range.
cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, false);
// Assume delta is either -1 or 1
if (count.load(std::memory_order::relaxed) == 0) {
if (uncache_bytes == 0) {
uncache_begin = page;
}
uncache_bytes += YUZU_PAGESIZE;
} else if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
false);
uncache_bytes = 0;
}
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
if (cache_bytes == 0) {
cache_begin = page;
}
cache_bytes += YUZU_PAGESIZE;
} else if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
cache_bytes = 0;
}
}
// Update the set.
const auto value = std::make_pair(modification_range, IdentityValue);
if (cache) {
map.add(value);
} else {
map.subtract(value);
if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
}
if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
}
}

View File

@@ -3,8 +3,8 @@
#pragma once
#include <mutex>
#include <boost/icl/interval_map.hpp>
#include <array>
#include <atomic>
#include "common/common_types.h"
#include "video_core/rasterizer_interface.h"
@@ -21,17 +21,28 @@ public:
explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_);
~RasterizerAccelerated() override;
void UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) override;
void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
private:
using PageIndex = VAddr;
using PageReferenceCount = u16;
class CacheEntry final {
public:
CacheEntry() = default;
using IntervalMap = boost::icl::interval_map<PageIndex, PageReferenceCount>;
using IntervalType = IntervalMap::interval_type;
std::atomic_uint16_t& Count(std::size_t page) {
return values[page & 3];
}
IntervalMap map;
std::mutex map_lock;
const std::atomic_uint16_t& Count(std::size_t page) const {
return values[page & 3];
}
private:
std::array<std::atomic_uint16_t, 4> values{};
};
static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
using CachedPages = std::array<CacheEntry, 0x2000000>;
std::unique_ptr<CachedPages> cached_pages;
Core::Memory::Memory& cpu_memory;
};

View File

@@ -162,7 +162,7 @@ public:
}
/// Increase/decrease the number of object in pages touching the specified region
virtual void UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {}
virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,

View File

@@ -58,6 +58,9 @@ Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rast
glObjectLabel(GL_BUFFER, buffer.handle, static_cast<GLsizei>(name.size()), name.data());
}
glNamedBufferData(buffer.handle, SizeBytes(), nullptr, GL_DYNAMIC_DRAW);
if (runtime.has_unified_vertex_buffers) {
glGetNamedBufferParameterui64vNV(buffer.handle, GL_BUFFER_GPU_ADDRESS_NV, &address);
}
}
void Buffer::ImmediateUpload(size_t offset, std::span<const u8> data) noexcept {
@@ -109,6 +112,7 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_,
: device{device_}, staging_buffer_pool{staging_buffer_pool_},
has_fast_buffer_sub_data{device.HasFastBufferSubData()},
use_assembly_shaders{device.UseAssemblyShaders()},
has_unified_vertex_buffers{device.HasVertexBufferUnifiedMemory()},
stream_buffer{has_fast_buffer_sub_data ? std::nullopt : std::make_optional<StreamBuffer>()} {
GLint gl_max_attributes;
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_max_attributes);
@@ -210,8 +214,14 @@ void BufferCacheRuntime::ClearBuffer(Buffer& dest_buffer, u32 offset, size_t siz
}
void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
index_buffer_offset = offset;
if (has_unified_vertex_buffers) {
buffer.MakeResident(GL_READ_ONLY);
glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset,
static_cast<GLsizeiptr>(Common::AlignUp(size, 4)));
} else {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
index_buffer_offset = offset;
}
}
void BufferCacheRuntime::BindVertexBuffer(u32 index, Buffer& buffer, u32 offset, u32 size,
@@ -219,8 +229,15 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, Buffer& buffer, u32 offset,
if (index >= max_attributes) {
return;
}
glBindVertexBuffer(index, buffer.Handle(), static_cast<GLintptr>(offset),
static_cast<GLsizei>(stride));
if (has_unified_vertex_buffers) {
buffer.MakeResident(GL_READ_ONLY);
glBindVertexBuffer(index, 0, 0, static_cast<GLsizei>(stride));
glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, index,
buffer.HostGpuAddr() + offset, static_cast<GLsizeiptr>(size));
} else {
glBindVertexBuffer(index, buffer.Handle(), static_cast<GLintptr>(offset),
static_cast<GLsizei>(stride));
}
}
void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bindings) {
@@ -233,9 +250,23 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
[](u64 stride) { return static_cast<GLsizei>(stride); });
const u32 count =
std::min(static_cast<u32>(bindings.buffers.size()), max_attributes - bindings.min_index);
glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count), buffer_handles.data(),
reinterpret_cast<const GLintptr*>(bindings.offsets.data()),
buffer_strides.data());
if (has_unified_vertex_buffers) {
for (u32 index = 0; index < count; ++index) {
Buffer& buffer = *bindings.buffers[index];
buffer.MakeResident(GL_READ_ONLY);
glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, bindings.min_index + index,
buffer.HostGpuAddr() + bindings.offsets[index],
static_cast<GLsizeiptr>(bindings.sizes[index]));
}
static constexpr std::array<size_t, 32> ZEROS{};
glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count),
reinterpret_cast<const GLuint*>(ZEROS.data()),
reinterpret_cast<const GLintptr*>(ZEROS.data()), buffer_strides.data());
} else {
glBindVertexBuffers(bindings.min_index, static_cast<GLsizei>(count), buffer_handles.data(),
reinterpret_cast<const GLintptr*>(bindings.offsets.data()),
buffer_strides.data());
}
}
void BufferCacheRuntime::BindUniformBuffer(size_t stage, u32 binding_index, Buffer& buffer,

View File

@@ -209,6 +209,7 @@ private:
bool has_fast_buffer_sub_data = false;
bool use_assembly_shaders = false;
bool has_unified_vertex_buffers = false;
bool use_storage_buffers = false;

View File

@@ -200,6 +200,7 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) {
has_broken_texture_view_formats = is_amd || (!is_linux && is_intel);
has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2;
has_derivative_control = GLAD_GL_ARB_derivative_control;
has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory;
has_debugging_tool_attached = IsDebugToolAttached(extensions);
has_depth_buffer_float = HasExtension(extensions, "GL_NV_depth_buffer_float");
has_geometry_shader_passthrough = GLAD_GL_NV_geometry_shader_passthrough;

View File

@@ -72,6 +72,10 @@ public:
return has_texture_shadow_lod;
}
bool HasVertexBufferUnifiedMemory() const {
return has_vertex_buffer_unified_memory;
}
bool HasASTC() const {
return has_astc;
}
@@ -211,6 +215,7 @@ private:
bool has_vertex_viewport_layer{};
bool has_image_load_formatted{};
bool has_texture_shadow_lod{};
bool has_vertex_buffer_unified_memory{};
bool has_astc{};
bool has_variable_aoffi{};
bool has_component_indexing_bug{};

View File

@@ -18,16 +18,27 @@ namespace OpenGL {
namespace {
constexpr std::array<GLenum, VideoCore::NumQueryTypes> QueryTargets = {GL_SAMPLES_PASSED};
constexpr GLenum GetTarget(VideoCore::QueryType type) {
return QueryTargets[static_cast<std::size_t>(type)];
switch (type) {
case VideoCore::QueryType::SamplesPassed:
return GL_SAMPLES_PASSED;
case VideoCore::QueryType::PrimitivesGenerated:
return GL_PRIMITIVES_GENERATED;
case VideoCore::QueryType::TfbPrimitivesWritten:
return GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
default:
break;
}
UNIMPLEMENTED_MSG("Query type {}", type);
return 0;
}
} // Anonymous namespace
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
: QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
: QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {
EnableCounters();
}
QueryCache::~QueryCache() = default;
@@ -103,13 +114,13 @@ u64 CachedQuery::Flush([[maybe_unused]] bool async) {
auto& stream = cache->Stream(type);
const bool slice_counter = WaitPending() && stream.IsEnabled();
if (slice_counter) {
stream.Update(false);
stream.Disable();
}
auto result = VideoCommon::CachedQueryBase<HostCounter>::Flush();
if (slice_counter) {
stream.Update(true);
stream.Enable();
}
return result;

View File

@@ -51,6 +51,22 @@ constexpr size_t NUM_SUPPORTED_VERTEX_ATTRIBUTES = 16;
void oglEnable(GLenum cap, bool state) {
(state ? glEnable : glDisable)(cap);
}
std::optional<VideoCore::QueryType> MaxwellToVideoCoreQuery(VideoCommon::QueryType type) {
switch (type) {
case VideoCommon::QueryType::PrimitivesGenerated:
case VideoCommon::QueryType::VtgPrimitivesOut:
return VideoCore::QueryType::PrimitivesGenerated;
case VideoCommon::QueryType::ZPassPixelCount64:
return VideoCore::QueryType::SamplesPassed;
case VideoCommon::QueryType::StreamingPrimitivesSucceeded:
// case VideoCommon::QueryType::StreamingByteCount:
// TODO: StreamingByteCount = StreamingPrimitivesSucceeded * num_verts * vert_stride
return VideoCore::QueryType::TfbPrimitivesWritten;
default:
return std::nullopt;
}
}
} // Anonymous namespace
RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
@@ -162,14 +178,18 @@ void RasterizerOpenGL::Clear(u32 layer_count) {
SyncFramebufferSRGB();
}
if (regs.clear_surface.Z) {
ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear Z but buffer is not enabled!");
if (regs.zeta_enable != 0) {
LOG_DEBUG(Render_OpenGL, "Tried to clear Z but buffer is not enabled!");
}
use_depth = true;
state_tracker.NotifyDepthMask();
glDepthMask(GL_TRUE);
}
if (regs.clear_surface.S) {
ASSERT_MSG(regs.zeta_enable, "Tried to clear stencil but buffer is not enabled!");
if (regs.zeta_enable) {
LOG_DEBUG(Render_OpenGL, "Tried to clear stencil but buffer is not enabled!");
}
use_stencil = true;
}
@@ -212,7 +232,6 @@ void RasterizerOpenGL::PrepareDraw(bool is_indexed, Func&& draw_func) {
SCOPE_EXIT({ gpu.TickWork(); });
gpu_memory->FlushCaching();
query_cache.UpdateCounters();
GraphicsPipeline* const pipeline{shader_cache.CurrentGraphicsPipeline()};
if (!pipeline) {
@@ -330,7 +349,6 @@ void RasterizerOpenGL::DrawTexture() {
MICROPROFILE_SCOPE(OpenGL_Drawing);
SCOPE_EXIT({ gpu.TickWork(); });
query_cache.UpdateCounters();
texture_cache.SynchronizeGraphicsDescriptors();
texture_cache.UpdateRenderTargets(false);
@@ -397,21 +415,28 @@ void RasterizerOpenGL::DispatchCompute() {
}
void RasterizerOpenGL::ResetCounter(VideoCommon::QueryType type) {
if (type == VideoCommon::QueryType::ZPassPixelCount64) {
query_cache.ResetCounter(VideoCore::QueryType::SamplesPassed);
const auto query_cache_type = MaxwellToVideoCoreQuery(type);
if (!query_cache_type.has_value()) {
UNIMPLEMENTED_IF_MSG(type != VideoCommon::QueryType::Payload, "Reset query type: {}", type);
return;
}
query_cache.ResetCounter(*query_cache_type);
}
void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
if (type == VideoCommon::QueryType::ZPassPixelCount64) {
if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, {gpu.GetTicks()});
} else {
query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, std::nullopt);
}
return;
const auto query_cache_type = MaxwellToVideoCoreQuery(type);
if (!query_cache_type.has_value()) {
return QueryFallback(gpu_addr, type, flags, payload, subreport);
}
const bool has_timeout = True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout);
const auto timestamp = has_timeout ? std::optional<u64>{gpu.GetTicks()} : std::nullopt;
query_cache.Query(gpu_addr, *query_cache_type, timestamp);
}
void RasterizerOpenGL::QueryFallback(GPUVAddr gpu_addr, VideoCommon::QueryType type,
VideoCommon::QueryPropertiesFlags flags, u32 payload,
u32 subreport) {
if (type != VideoCommon::QueryType::Payload) {
payload = 1u;
}
@@ -1294,15 +1319,13 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
program->ConfigureTransformFeedback();
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation) ||
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Geometry));
UNIMPLEMENTED_IF(primitive_mode != GL_POINTS);
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
// We may have to call BeginTransformFeedbackNV here since they seem to call different
// implementations on Nvidia's driver (the pointer is different) but we are using
// ARB_transform_feedback3 features with NV_transform_feedback interactions and the ARB
// extension doesn't define BeginTransformFeedback (without NV) interactions. It just works.
glBeginTransformFeedback(GL_POINTS);
glBeginTransformFeedback(primitive_mode);
}
void RasterizerOpenGL::EndTransformFeedback() {

View File

@@ -225,6 +225,9 @@ private:
/// End a transform feedback
void EndTransformFeedback();
void QueryFallback(GPUVAddr gpu_addr, VideoCommon::QueryType type,
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport);
Tegra::GPU& gpu;
const Device& device;

View File

@@ -51,7 +51,7 @@ using VideoCommon::LoadPipelines;
using VideoCommon::SerializePipeline;
using Context = ShaderContext::Context;
constexpr u32 CACHE_VERSION = 9;
constexpr u32 CACHE_VERSION = 10;
template <typename Container>
auto MakeSpan(Container& container) {

View File

@@ -168,6 +168,14 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
if (!GLAD_GL_ARB_seamless_cubemap_per_texture && !GLAD_GL_AMD_seamless_cubemap_per_texture) {
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
// Enable unified vertex attributes and query vertex buffer address when the driver supports it
if (device.HasVertexBufferUnifiedMemory()) {
glEnableClientState(GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV);
glEnableClientState(GL_ELEMENT_ARRAY_UNIFIED_NV);
glMakeNamedBufferResidentNV(vertex_buffer.handle, GL_READ_ONLY);
glGetNamedBufferParameterui64vNV(vertex_buffer.handle, GL_BUFFER_GPU_ADDRESS_NV,
&vertex_buffer_address);
}
}
RendererOpenGL::~RendererOpenGL() = default;
@@ -667,7 +675,13 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) {
offsetof(ScreenRectVertex, tex_coord));
glVertexAttribBinding(PositionLocation, 0);
glVertexAttribBinding(TexCoordLocation, 0);
glBindVertexBuffer(0, vertex_buffer.handle, 0, sizeof(ScreenRectVertex));
if (device.HasVertexBufferUnifiedMemory()) {
glBindVertexBuffer(0, 0, 0, sizeof(ScreenRectVertex));
glBufferAddressRangeNV(GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV, 0, vertex_buffer_address,
sizeof(vertices));
} else {
glBindVertexBuffer(0, vertex_buffer.handle, 0, sizeof(ScreenRectVertex));
}
if (Settings::values.scaling_filter.GetValue() != Settings::ScalingFilter::NearestNeighbor) {
glBindSampler(0, present_sampler.handle);

Some files were not shown because too many files have changed in this diff Show More