Compare commits

..

58 Commits

Author SHA1 Message Date
Morph
5568763a57 vk_compute_pass: Use VK_ACCESS_NONE
This enumeration was introduced in Vulkan 1.3, prefer using this instead of defaulting the enum.

Also resolves a narrowing conversion warning on MSVC.
2022-06-14 09:14:13 -04:00
Mai
a3b12e3809 Merge pull request #8439 from liamwhite/monkey-compiler
general: fix compilation on GCC 12
2022-06-14 08:34:16 -04:00
Mai
dc47d0f624 Merge pull request #8459 from Morph1984/wextra-gcc
vk_compute_pass: Silence Wextra warning
2022-06-14 08:22:38 -04:00
Morph
fcfe192e83 vk_compute_pass: Silence Wextra warning
Silences a warning about using enumerated and non-enumerated types in a conditional expression.
2022-06-14 05:29:57 -04:00
Liam
bd38aefc57 kernel: fix passthrough of local captures in lambda 2022-06-13 20:09:32 -04:00
Liam
feaf010fa2 common/assert: rework ASSERT handling to avoid std::function usage 2022-06-13 20:09:32 -04:00
Liam
ebecdd3a74 general: fix compilation on MinGW GCC 12 2022-06-13 20:09:32 -04:00
Liam
a29ddcee40 common/assert: add unlikely 2022-06-13 20:09:32 -04:00
Liam
d11547024c general: fix compilation on GCC 12 2022-06-13 20:09:30 -04:00
Liam
6f59e2676b kernel: ensure class token lambda exit is unreachable 2022-06-13 20:09:00 -04:00
Liam
8fea7e56e5 kernel: fix inconsistency in AutoObjectTraits macro definitions 2022-06-13 20:09:00 -04:00
Liam
58fea44eb5 common: Don't test ASSERT conditions inline 2022-06-13 20:09:00 -04:00
Liam
084d7d6b01 common: Change semantics of UNREACHABLE to unconditionally crash 2022-06-13 20:09:00 -04:00
liamwhite
bd3bfe411d Merge pull request #8458 from lat9nq/no-constexpr-flow-block
structured_control_flow: Remove constexpr Flow::Block
2022-06-13 20:06:38 -04:00
lat9nq
963ed37fd6 structured_control_flow: Remove constexpr Flow::Block
This seems to be unsupported in newer libstdc++ versions due to
Flow::Block's base class being a non-literal type. It's not clear to me
why this was permitted in earlier versions.
2022-06-13 19:18:20 -04:00
bunnei
741da9c8bf Merge pull request #8388 from liamwhite/simpler-pause
CpuManager: simplify pausing
2022-06-13 15:48:03 -07:00
Morph
a0407a8e64 Merge pull request #8446 from liamwhite/cmd-gdb
core/debugger: support operation in yuzu-cmd
2022-06-13 14:38:37 -04:00
Morph
7582717c9d Merge pull request #8454 from liamwhite/inaddr-any
core/debugger: allow remote connections
2022-06-13 14:38:20 -04:00
bunnei
ec85eac3c9 Merge pull request #8443 from liamwhite/code-mem
kernel: fix KCodeMemory initialization
2022-06-13 11:32:27 -07:00
Liam
fb4b507ba4 core/debugger: allow remote connections 2022-06-12 11:50:50 -04:00
liamwhite
7ea78699a1 Merge pull request #8450 from lioncash/undef
gdbstub_arch: Add missing virtual destructor
2022-06-11 19:59:18 -04:00
Lioncash
80ad90651e gdbstub_arch: Add missing virtual destructor
The class is used polymorphically, so it's undefined behavior to delete
instances of GDBStubA64 and GDBStubA32 from the base class pointer.
2022-06-11 18:23:22 -04:00
Mai M
b94739cfa7 Merge pull request #8353 from Docteh/msvc_report_runtime
log the MSVC runtime version when running on MSVC build
2022-06-11 13:21:23 -04:00
Mai M
89e00c442d Merge pull request #8427 from Docteh/deprecate_qdesktop
deprecate usage of QDesktopWidget for going fullscreen
2022-06-11 13:20:36 -04:00
Mai M
d796341d33 Merge pull request #8449 from Docteh/translate_placeholder
retranslate the game list placeholder
2022-06-11 13:19:18 -04:00
bunnei
5282efac1b Merge pull request #8413 from behunin/bounded-queue
gpu_thread: Move to bounded queue
2022-06-11 00:07:18 -07:00
bunnei
ae83d5c6d3 Merge pull request #8393 from lat9nq/default-vulkan
general: Set renderer_backend's default to Vulkan
2022-06-11 00:06:59 -07:00
Kyle Kienapfel
3370546a7a log the MSVC runtime version when running on MSVC build
This might be useful information, not 100% sure.

[   0.958068] Frontend <Info> yuzu\main.cpp:GMainWindow:275: yuzu Version: yuzu Development Build | master-0b9ef3c0b-dirty
[   0.958095] Frontend <Info> yuzu\main.cpp:LogRuntimes:220: MSVC Compiler: 1931 Runtime: 14.32.31326.0
2022-06-10 20:37:47 -07:00
Kyle Kienapfel
2ff606628c UI: retranslate the game list placeholder
This is the "Double-click to add a new folder to the game list" message
that shows up when users first launch yuzu and is most likely never seen
again. Previously this message was not re-translated.
2022-06-10 20:15:52 -07:00
Mai M
20576ebb43 Merge pull request #8405 from Docteh/dock_undock
ui: Status bars dock button becomes DOCKED/HANDHELD button
2022-06-10 23:11:29 -04:00
Mai M
6f81160160 Merge pull request #8333 from Docteh/translate_hotkeys
UI: Translate hotkey labels in configuration
2022-06-10 23:10:28 -04:00
Mai M
266e086706 Merge pull request #8318 from Docteh/cmake-qt56-entry
Update some files with Qt 5.15.2 best practices in mind
2022-06-10 23:09:49 -04:00
Mai M
9561a2f5b1 Merge pull request #8448 from german77/gesturetypo
service: hid: Fix gesture regression
2022-06-10 15:09:22 -04:00
Narr the Reg
bc8699a9fa service: hid: Fix gesture regression 2022-06-10 13:14:31 -05:00
Liam
c3cc65a11e yuzu-cmd: ignore bogus timeous from SDL 2022-06-10 12:49:18 -04:00
Liam
1f0fee33ed core/debugger: fix a number of shutdown deadlocks 2022-06-10 09:17:12 -04:00
Liam
de6c0defb3 core/debugger: support operation in yuzu-cmd 2022-06-10 09:11:02 -04:00
Liam
6c659c3a16 kernel: fix KCodeMemory initialization 2022-06-09 12:33:28 -04:00
Liam
af022294dd CpuManager: simplify pausing 2022-06-08 21:47:29 -04:00
bunnei
073714a762 Merge pull request #8428 from bunnei/nvflinger-fix-timing
Follow-up fixes for NVFlinger rewrite (Part 3)
2022-06-08 11:20:05 -07:00
bunnei
4ae75bec50 Merge pull request #8436 from liamwhite/asio-usage
core/debugger: fix asio write usage
2022-06-07 14:16:47 -07:00
Mai M
31527ccd25 Merge pull request #8435 from liamwhite/lambda-capture
core/debugger: fix crash due to incorrect lambda capture
2022-06-06 23:58:34 -04:00
Liam
268878f895 core/debugger: fix asio write usage 2022-06-06 23:50:56 -04:00
Kyle Kienapfel
941b663352 deprecate usage of QDesktopWidget for going fullscreen
Idea works as follows, while going fullscreen we compare the current window geometry with
available screens and ask for an intersection rectangle, we go fullscreen where most of
the window is located

GuessCurrentScreen could also potentially be used to see which screen
the window is on for dynamic DPI handling
2022-06-05 20:18:27 -07:00
bunnei
888e814130 hle: service: nvflinger: buffer_queue_consumer: Always free released buffers. 2022-06-05 16:06:06 -07:00
lat9nq
4544407af6 configure_graphics: Remove unused include 2022-06-04 04:18:21 -04:00
Levi Behunin
4dd6bcd206 gpu_thread: Move to bounded queue 2022-06-02 19:37:46 -06:00
Kyle Kienapfel
054732210e ui: Status bars dock button becomes dock/undock button
For people not used to the Yuzu UI it's not always clear if the emulated
console is docked or not.  The other items update their text when clicked,
this PR brings the DOCK button in line with this.

DOCK -> DOCKED or HANDHELD
2022-06-01 17:22:53 -07:00
lat9nq
422525e3fb main: Insert warning text on broken Vulkan
Co-authored-by: Schplee <24275329+Schplee@users.noreply.github.com>
2022-05-30 10:58:19 -04:00
lat9nq
2dafb27055 main: Save config on broken Vulkan detect
Prevents possible issues if someone were to open yuzu repeatedly over
and over again.
2022-05-30 10:58:19 -04:00
lat9nq
500b01076e yuzu-qt: Make has_broken_vulkan only for crashes
Being able to catch and handle a Vulkan exception is not what this is
for.
2022-05-30 10:58:18 -04:00
lat9nq
b43ae9d5ed vulkan_library: Add debug logging 2022-05-30 10:57:59 -04:00
lat9nq
f22867efc5 yuzu-qt: Attempt to workaround broken Vulkan installations
This does a few things in order to make the default setting Vulkan
workable.

- When yuzu boots, it just opens the Vulkan library.
  - If it works, all good and we continue with Vulkan as the default.
  - If something breaks, a new file in the config directory will be left
    behind (this is deleted normally).
- If Vulkan is not working, has_broken_vulkan is set to true.
  - The first time this happens, a warning is displayed to notify the
    user.
  - This forces use of OpenGL, and Vulkan cannot be selected.
  - The Shader Backend selector is made accessible for use in custom
    configurations.
  - To disable has_broken_vulkan, the user needs to press a button in
    Graphics Configuration to manually run the Vulkan device
    enumeration.
2022-05-30 10:57:59 -04:00
lat9nq
67fa743414 default_ini: Reflect new renderer backend default setting 2022-05-29 21:38:36 -04:00
lat9nq
5799fa4d7d settings: Set Vulkan to the default renderer backend 2022-05-29 21:38:36 -04:00
Kyle K
499c89790b motion touch ui: move remaining connection out of .ui file
Two reasons for this:
1. Out of 7 connections, 6 are in ConfigureMotionTouch::ConnectEvents,
   this is the outlier.
2. Qt6 doesn't moc the connection properly
2022-05-29 18:37:38 -07:00
Kyle K
75bf2c20eb Update some files with Qt 5.15.2 best practices in mind
There was some discussion about updating to Qt6 and I figured I would
work on some smaller parts. For Windows platform the WinMain function has moved
from the Qt5::WinMain to a new one called Qt6::EntryPointPrivate

Also Qt5 supports versionless CMake targets
https://www.qt.io/blog/versionless-cmake-targets-qt-5.15

These other changes in this commit are to support Qt6, but in ways that don't mess with Qt5.

src/yuzu/bootmanager.cpp: Qt6 complains about not being able to know to use QPoint or QPointF, picking QPoint
src/yuzu/bootmanager.h: Qt6 prefers that QStringList.h be included rather than an empty class definition
src/yuzu/configuration/configure_system.cpp: toULongLong intends to return unsigned 64 bit integer, but
   Settings::values.rng_seed is only 32 bits wide
src/yuzu/game_list.cpp: Qt6 returns a different datatype for QStringList.length than Qt5,
   it used to be int, but in Qt6 its now qsizetype
src/yuzu/loading_screen.cpp: Qt5's for QStyleOption.init say to switch to initFrom.
   The QStyleOption.init doesn't exist in Qt6
src/yuzu/main.cpp: Another QPointer and QStringList.size, lets standardize on size()
2022-05-29 09:21:52 -07:00
Kyle K
669a9a644d UI: Translate hotkey labels in configuration
Another request from GillianMC.

The translated strings have been placed in a separate "Hotkeys" context as an alternative
 to having to add the tr function to the Config class, or adding them to ConfigureHotkeys
context which is quite long. The English strings get attached to the items in the Action
column as "data", and are used for RetranslateUI and saving the hotkey configuration.
2022-05-18 22:05:19 -07:00
129 changed files with 1027 additions and 509 deletions

View File

@@ -58,6 +58,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #109010;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #000000;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 21px;
min-height: 21px;

View File

@@ -1304,6 +1304,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #ffffff;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 23px;
min-height: 23px;

View File

@@ -2207,6 +2207,19 @@ QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#DockingStatusBarButton {
min-width: 0px;
color: #ffffff;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#DockingStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#buttonRefreshDevices {
min-width: 19px;
min-height: 19px;

View File

@@ -40,6 +40,11 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
add_library(microprofile INTERFACE)
target_include_directories(microprofile INTERFACE ./microprofile)
# GCC bugs
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND MINGW)
target_compile_options(microprofile INTERFACE "-Wno-array-bounds")
endif()
# libusb
if (NOT LIBUSB_FOUND OR YUZU_USE_BUNDLED_LIBUSB)
add_subdirectory(libusb)

View File

@@ -429,7 +429,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
in_params.node_id);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
}
}
@@ -1312,7 +1312,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::s
samples_to_read - samples_read, channel, temp_mix_offset);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
temp_mix_offset += samples_decoded;

View File

@@ -50,7 +50,7 @@ EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
effects[i] = std::make_unique<EffectBiquadFilter>();
break;
default:
UNREACHABLE_MSG("Unimplemented effect {}", effect);
ASSERT_MSG(false, "Unimplemented effect {}", effect);
effects[i] = std::make_unique<EffectStubbed>();
}
return GetInfo(i);
@@ -104,7 +104,7 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
return;
}

View File

@@ -483,7 +483,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
// Add more work
index_stack.push(j);
} else if (node_state == NodeStates::State::InFound) {
UNREACHABLE_MSG("Node start marked as found");
ASSERT_MSG(false, "Node start marked as found");
ResetState();
return false;
}

View File

@@ -114,7 +114,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
in_params.current_playstate = ServerPlayState::Play;
break;
default:
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
break;
}
@@ -410,7 +410,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
return in_params.should_depop;
}
default:
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
}
return false;

View File

@@ -6,8 +6,13 @@
#include "common/settings.h"
void assert_handle_failure() {
void assert_fail_impl() {
if (Settings::values.use_debug_asserts) {
Crash();
}
}
[[noreturn]] void unreachable_impl() {
Crash();
throw std::runtime_error("Unreachable code");
}

View File

@@ -9,44 +9,43 @@
// Sometimes we want to try to continue even after hitting an assert.
// However touching this file yields a global recompilation as this header is included almost
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
void assert_handle_failure();
// For asserts we'd like to keep all the junk executed when an assert happens away from the
// important code in the function. One way of doing this is to put all the relevant code inside a
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
// template that calls the lambda. This seems to generate an extra instruction at the call-site
// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
// enough for our purposes.
template <typename Fn>
#if defined(_MSC_VER)
[[msvc::noinline]]
#elif defined(__GNUC__)
[[gnu::cold, gnu::noinline]]
void assert_fail_impl();
[[noreturn]] void unreachable_impl();
#ifdef _MSC_VER
#define YUZU_NO_INLINE __declspec(noinline)
#else
#define YUZU_NO_INLINE __attribute__((noinline))
#endif
static void
assert_noinline_call(const Fn& fn) {
fn();
assert_handle_failure();
}
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!"); \
assert_fail_impl(); \
} \
while (0)
}())
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
assert_fail_impl(); \
} \
while (0)
}())
#define UNREACHABLE() \
do { \
LOG_CRITICAL(Debug, "Unreachable code!"); \
unreachable_impl(); \
} while (0)
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE_MSG(...) \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
do { \
LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
unreachable_impl(); \
} while (0)
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)

View File

@@ -0,0 +1,180 @@
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
// SPDX-License-Identifier: MIT
#pragma once
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4324)
#endif
#include <atomic>
#include <bit>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <new>
#include <stdexcept>
#include <stop_token>
#include <type_traits>
#include <utility>
namespace Common {
namespace mpsc {
#if defined(__cpp_lib_hardware_interference_size)
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
#else
constexpr size_t hardware_interference_size = 64;
#endif
template <typename T>
using AlignedAllocator = std::allocator<T>;
template <typename T>
struct Slot {
~Slot() noexcept {
if (turn.test()) {
destroy();
}
}
template <typename... Args>
void construct(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
std::construct_at(reinterpret_cast<T*>(&storage), std::forward<Args>(args)...);
}
void destroy() noexcept {
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
std::destroy_at(reinterpret_cast<T*>(&storage));
}
T&& move() noexcept {
return reinterpret_cast<T&&>(storage);
}
// Align to avoid false sharing between adjacent slots
alignas(hardware_interference_size) std::atomic_flag turn{};
struct aligned_store {
struct type {
alignas(T) unsigned char data[sizeof(T)];
};
};
typename aligned_store::type storage;
};
template <typename T, typename Allocator = AlignedAllocator<Slot<T>>>
class Queue {
public:
explicit Queue(const size_t capacity, const Allocator& allocator = Allocator())
: allocator_(allocator) {
if (capacity < 1) {
throw std::invalid_argument("capacity < 1");
}
// Ensure that the queue length is an integer power of 2
// This is so that idx(i) can be a simple i & mask_ insted of i % capacity
// https://github.com/rigtorp/MPMCQueue/pull/36
if (!std::has_single_bit(capacity)) {
throw std::invalid_argument("capacity must be an integer power of 2");
}
mask_ = capacity - 1;
// Allocate one extra slot to prevent false sharing on the last slot
slots_ = allocator_.allocate(mask_ + 2);
// Allocators are not required to honor alignment for over-aligned types
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
// alignment here
if (reinterpret_cast<uintptr_t>(slots_) % alignof(Slot<T>) != 0) {
allocator_.deallocate(slots_, mask_ + 2);
throw std::bad_alloc();
}
for (size_t i = 0; i < mask_ + 1; ++i) {
std::construct_at(&slots_[i]);
}
static_assert(alignof(Slot<T>) == hardware_interference_size,
"Slot must be aligned to cache line boundary to prevent false sharing");
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
"Slot size must be a multiple of cache line size to prevent "
"false sharing between adjacent slots");
static_assert(sizeof(Queue) % hardware_interference_size == 0,
"Queue size must be a multiple of cache line size to "
"prevent false sharing between adjacent queues");
}
~Queue() noexcept {
for (size_t i = 0; i < mask_ + 1; ++i) {
slots_[i].~Slot();
}
allocator_.deallocate(slots_, mask_ + 2);
}
// non-copyable and non-movable
Queue(const Queue&) = delete;
Queue& operator=(const Queue&) = delete;
void Push(const T& v) noexcept {
static_assert(std::is_nothrow_copy_constructible_v<T>,
"T must be nothrow copy constructible");
emplace(v);
}
template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
void Push(P&& v) noexcept {
emplace(std::forward<P>(v));
}
void Pop(T& v, std::stop_token stop) noexcept {
auto const tail = tail_.fetch_add(1);
auto& slot = slots_[idx(tail)];
if (false == slot.turn.test()) {
std::unique_lock lock{cv_mutex};
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
}
v = slot.move();
slot.destroy();
slot.turn.clear();
slot.turn.notify_one();
}
private:
template <typename... Args>
void emplace(Args&&... args) noexcept {
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
"T must be nothrow constructible with Args&&...");
auto const head = head_.fetch_add(1);
auto& slot = slots_[idx(head)];
slot.turn.wait(true);
slot.construct(std::forward<Args>(args)...);
slot.turn.test_and_set();
cv.notify_one();
}
constexpr size_t idx(size_t i) const noexcept {
return i & mask_;
}
std::conditional_t<true, std::condition_variable_any, std::condition_variable> cv;
std::mutex cv_mutex;
size_t mask_;
Slot<T>* slots_;
[[no_unique_address]] Allocator allocator_;
// Align to avoid false sharing between head_ and tail_
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
"T must be nothrow copy or move assignable");
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
};
} // namespace mpsc
template <typename T, typename Allocator = mpsc::AlignedAllocator<mpsc::Slot<T>>>
using MPSCQueue = mpsc::Queue<T, Allocator>;
} // namespace Common
#ifdef _MSC_VER
#pragma warning(pop)
#endif

View File

@@ -147,7 +147,7 @@ void UpdateRescalingInfo() {
info.down_shift = 0;
break;
default:
UNREACHABLE();
ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
}

View File

@@ -496,7 +496,7 @@ struct Values {
// Renderer
RangedSetting<RendererBackend> renderer_backend{
RendererBackend::OpenGL, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
RendererBackend::Vulkan, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
BasicSetting<bool> renderer_debug{false, "debug"};
BasicSetting<bool> renderer_shader_feedback{false, "shader_feedback"};
BasicSetting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};

View File

@@ -493,6 +493,12 @@ void System::Shutdown() {
impl->Shutdown();
}
void System::DetachDebugger() {
if (impl->debugger) {
impl->debugger->NotifyShutdown();
}
}
std::unique_lock<std::mutex> System::StallCPU() {
return impl->StallCPU();
}

View File

@@ -160,6 +160,9 @@ public:
/// Shutdown the emulated system.
void Shutdown();
/// Forcibly detach the debugger if it is running.
void DetachDebugger();
std::unique_lock<std::mutex> StallCPU();
void UnstallCPU();

View File

@@ -16,7 +16,8 @@
namespace Core {
CpuManager::CpuManager(System& system_) : system{system_} {}
CpuManager::CpuManager(System& system_)
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
CpuManager::~CpuManager() = default;
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
@@ -30,8 +31,10 @@ void CpuManager::Initialize() {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
} else {
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
pause_barrier = std::make_unique<Common::Barrier>(2);
}
}
@@ -138,51 +141,14 @@ void CpuManager::MultiCoreRunSuspendThread() {
auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::MultiCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = true;
for (const auto& data : core_data) {
all_not_barrier &= !data.is_running.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.enter_barrier->Set();
}
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.exit_barrier->Set();
}
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
/// Don't release the barrier
}
paused_state = paused;
}
///////////////////////////////////////////////////////////////////////////////
/// SingleCore ///
///////////////////////////////////////////////////////////////////////////////
@@ -235,8 +201,9 @@ void CpuManager::SingleCoreRunSuspendThread() {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
scheduler.RescheduleCurrentCore();
}
@@ -274,37 +241,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
}
}
void CpuManager::SingleCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
}
core_data[0].enter_barrier->Set();
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
core_data[0].exit_barrier->Set();
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
/// Don't release the barrier
}
paused_state = paused;
}
void CpuManager::Pause(bool paused) {
if (is_multicore) {
MultiCorePause(paused);
} else {
SingleCorePause(paused);
std::scoped_lock lk{pause_lock};
if (pause_state == paused) {
return;
}
// Set the new state
pause_state.store(paused);
// Wake up any waiting threads
pause_state.notify_all();
// Wait for all threads to successfully change state before returning
pause_barrier->Sync();
}
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
@@ -320,27 +271,29 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
auto& data = core_data[core];
data.enter_barrier = std::make_unique<Common::Event>();
data.exit_barrier = std::make_unique<Common::Event>();
data.host_context = Common::Fiber::ThreadToFiber();
data.is_running = false;
data.initialized = true;
const bool sc_sync = !is_async_gpu && !is_multicore;
bool sc_sync_first_use = sc_sync;
// Cleanup
SCOPE_EXIT({
data.host_context->Exit();
data.enter_barrier.reset();
data.exit_barrier.reset();
data.initialized = false;
MicroProfileOnThreadExit();
});
/// Running
while (running_mode) {
data.is_running = false;
data.enter_barrier->Wait();
if (pause_state.load(std::memory_order_relaxed)) {
// Wait for caller to acknowledge pausing
pause_barrier->Sync();
// Wait until unpaused
pause_state.wait(true, std::memory_order_relaxed);
// Wait for caller to acknowledge unpausing
pause_barrier->Sync();
}
if (sc_sync_first_use) {
system.GPU().ObtainContext();
sc_sync_first_use = false;
@@ -352,12 +305,7 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
data.is_running = true;
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
data.is_running = false;
data.is_paused = true;
data.exit_barrier->Wait();
data.is_paused = false;
}
}

View File

@@ -69,13 +69,11 @@ private:
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void MultiCoreRunSuspendThread();
void MultiCorePause(bool paused);
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
void SingleCoreRunSuspendThread();
void SingleCorePause(bool paused);
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
@@ -83,16 +81,13 @@ private:
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::unique_ptr<Common::Event> enter_barrier;
std::unique_ptr<Common::Event> exit_barrier;
std::atomic<bool> is_running;
std::atomic<bool> is_paused;
std::atomic<bool> initialized;
std::jthread host_thread;
};
std::atomic<bool> running_mode{};
std::atomic<bool> paused_state{};
std::atomic<bool> pause_state{};
std::unique_ptr<Common::Barrier> pause_barrier{};
std::mutex pause_lock{};
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};

View File

@@ -140,7 +140,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
return 0x3C;
}
UNREACHABLE();
return 0;
}
u64 GetSignatureTypePaddingSize(SignatureType type) {
@@ -155,7 +154,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
return 0x40;
}
UNREACHABLE();
return 0;
}
SignatureType Ticket::GetSignatureType() const {

View File

@@ -42,6 +42,16 @@ static std::span<const u8> ReceiveInto(Readable& r, Buffer& buffer) {
return received_data;
}
enum class SignalType {
Stopped,
ShuttingDown,
};
struct SignalInfo {
SignalType type;
Kernel::KThread* thread;
};
namespace Core {
class DebuggerImpl : public DebuggerBackend {
@@ -56,7 +66,7 @@ public:
ShutdownServer();
}
bool NotifyThreadStopped(Kernel::KThread* thread) {
bool SignalDebugger(SignalInfo signal_info) {
std::scoped_lock lk{connection_lock};
if (stopped) {
@@ -64,9 +74,13 @@ public:
// It should be ignored.
return false;
}
stopped = true;
signal_pipe.write_some(boost::asio::buffer(&thread, sizeof(thread)));
// Set up the state.
stopped = true;
info = signal_info;
// Write a single byte into the pipe to wake up the debug interface.
boost::asio::write(signal_pipe, boost::asio::buffer(&stopped, sizeof(stopped)));
return true;
}
@@ -75,7 +89,7 @@ public:
}
void WriteToClient(std::span<const u8> data) override {
client_socket.write_some(boost::asio::buffer(data.data(), data.size_bytes()));
boost::asio::write(client_socket, boost::asio::buffer(data.data(), data.size_bytes()));
}
void SetActiveThread(Kernel::KThread* thread) override {
@@ -96,7 +110,7 @@ private:
connection_thread = std::jthread([&, port](std::stop_token stop_token) {
try {
// Initialize the listening socket and accept a new client.
tcp::endpoint endpoint{boost::asio::ip::address_v4::loopback(), port};
tcp::endpoint endpoint{boost::asio::ip::address_v4::any(), port};
tcp::acceptor acceptor{io_context, endpoint};
acceptor.async_accept(client_socket, [](const auto&) {});
@@ -124,7 +138,7 @@ private:
Common::SetCurrentThreadName("yuzu:Debugger");
// Set up the client signals for new data.
AsyncReceiveInto(signal_pipe, active_thread, [&](auto d) { PipeData(d); });
AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
AsyncReceiveInto(client_socket, client_data, [&](auto d) { ClientData(d); });
// Stop the emulated CPU.
@@ -142,9 +156,28 @@ private:
}
void PipeData(std::span<const u8> data) {
AllCoreStop();
UpdateActiveThread();
frontend->Stopped(active_thread);
switch (info.type) {
case SignalType::Stopped:
// Stop emulation.
AllCoreStop();
// Notify the client.
active_thread = info.thread;
UpdateActiveThread();
frontend->Stopped(active_thread);
break;
case SignalType::ShuttingDown:
frontend->ShuttingDown();
// Wait for emulation to shut down gracefully now.
suspend.reset();
signal_pipe.close();
client_socket.shutdown(boost::asio::socket_base::shutdown_both);
LOG_INFO(Debug_GDBStub, "Shut down server");
break;
}
}
void ClientData(std::span<const u8> data) {
@@ -246,7 +279,9 @@ private:
boost::asio::ip::tcp::socket client_socket;
std::optional<std::unique_lock<std::mutex>> suspend;
SignalInfo info;
Kernel::KThread* active_thread;
bool pipe_data;
bool stopped;
std::array<u8, 4096> client_data;
@@ -263,7 +298,13 @@ Debugger::Debugger(Core::System& system, u16 port) {
Debugger::~Debugger() = default;
bool Debugger::NotifyThreadStopped(Kernel::KThread* thread) {
return impl && impl->NotifyThreadStopped(thread);
return impl && impl->SignalDebugger(SignalInfo{SignalType::Stopped, thread});
}
void Debugger::NotifyShutdown() {
if (impl) {
impl->SignalDebugger(SignalInfo{SignalType::ShuttingDown, nullptr});
}
}
} // namespace Core

View File

@@ -35,6 +35,11 @@ public:
*/
bool NotifyThreadStopped(Kernel::KThread* thread);
/**
* Notify the debugger that a shutdown is being performed now and disconnect.
*/
void NotifyShutdown();
private:
std::unique_ptr<DebuggerImpl> impl;
};

View File

@@ -66,6 +66,11 @@ public:
*/
virtual void Stopped(Kernel::KThread* thread) = 0;
/**
* Called when emulation is shutting down.
*/
virtual void ShuttingDown() = 0;
/**
* Called when new data is asynchronously received on the client socket.
* A list of actions to perform is returned.

View File

@@ -106,6 +106,8 @@ GDBStub::~GDBStub() = default;
void GDBStub::Connected() {}
void GDBStub::ShuttingDown() {}
void GDBStub::Stopped(Kernel::KThread* thread) {
SendReply(arch->ThreadStatus(thread, GDB_STUB_SIGTRAP));
}

View File

@@ -23,6 +23,7 @@ public:
void Connected() override;
void Stopped(Kernel::KThread* thread) override;
void ShuttingDown() override;
std::vector<DebuggerAction> ClientData(std::span<const u8> data) override;
private:

View File

@@ -15,6 +15,7 @@ namespace Core {
class GDBStubArch {
public:
virtual ~GDBStubArch() = default;
virtual std::string GetTargetXML() const = 0;
virtual std::string RegRead(const Kernel::KThread* thread, size_t id) const = 0;
virtual void RegWrite(Kernel::KThread* thread, size_t id, std::string_view value) const = 0;

View File

@@ -419,7 +419,7 @@ std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type
Core::Crypto::Mode::ECB);
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
Core::Crypto::Key128 out;
Core::Crypto::Key128 out{};
if (type == NCASectionCryptoType::XTS) {
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {

View File

@@ -50,7 +50,7 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
low = mid + 1;
}
}
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
return {0, 0};
}
} // Anonymous namespace

View File

@@ -108,7 +108,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
ASSERT_MSG(false, "Invalid NCAContentType={:02X}", type);
return ContentRecordType{};
}
}

View File

@@ -144,7 +144,7 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
}
} else {
UNREACHABLE();
ASSERT(false);
return nullptr;
}

View File

@@ -65,7 +65,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
controller->Connect(true);
} else {
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
ASSERT_MSG(false, "Unable to add a new controller based on the given parameters!");
}
}

View File

@@ -48,7 +48,7 @@ EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type) {
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}
@@ -77,7 +77,7 @@ const EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}

View File

@@ -141,7 +141,7 @@ public:
if (index < DomainHandlerCount()) {
domain_handlers[index] = nullptr;
} else {
UNREACHABLE_MSG("Unexpected handler index {}", index);
ASSERT_MSG(false, "Unexpected handler index {}", index);
}
}

View File

@@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
}
// If we've hit the end of a gap, free it.

View File

@@ -35,7 +35,7 @@ public:
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}
@@ -49,7 +49,7 @@ public:
case Svc::ArbitrationType::WaitIfEqual:
return WaitIfEqual(addr, value, timeout);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}

View File

@@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
ASSERT(false);
return 0;
}
@@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
ASSERT(IsAllowed39BitType(type));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
ASSERT(false);
return 0;
}

View File

@@ -18,7 +18,7 @@ namespace Kernel {
class KernelCore;
class KProcess;
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
\
private: \
friend class ::Kernel::KClassTokenGenerator; \
@@ -40,16 +40,19 @@ public:
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
virtual TypeObj GetTypeObj() const { \
virtual TypeObj GetTypeObj() ATTRIBUTE { \
return GetStaticTypeObj(); \
} \
virtual const char* GetTypeName() const { \
virtual const char* GetTypeName() ATTRIBUTE { \
return GetStaticTypeName(); \
} \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
@@ -82,7 +85,7 @@ protected:
};
private:
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {

View File

@@ -49,6 +49,7 @@ private:
}
}
}
UNREACHABLE();
}();
template <typename T>

View File

@@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
m_page_group =
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
m_page_group = {};
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
// Clear the memory.
//
// FIXME: this ends up clobbering address ranges outside the scope of the mapping within
// guest memory, and is not specifically required if the guest program is correctly
// written, so disable until this is further investigated.
//
// for (const auto& block : m_page_group.Nodes()) {
// std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
// }
for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.
m_owner->Open();
m_address = addr;
m_is_initialized = true;
m_is_owner_mapped = false;
@@ -57,8 +52,14 @@ void KCodeMemory::Finalize() {
// Unlock.
if (!m_is_mapped && !m_is_owner_mapped) {
const size_t size = m_page_group.GetNumPages() * PageSize;
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
}
// Close the page group.
m_page_group = {};
// Close our reference to our owner.
m_owner->Close();
}
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
@@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
k_perm = KMemoryPermission::UserReadExecute;
break;
default:
break;
// Already validated by ControlCodeMemory svc
UNREACHABLE();
}
// Map the memory.

View File

@@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool::SystemNonSecure;
} else {
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
return {};
}
}

View File

@@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
case FileSys::ProgramAddressSpaceType::Is39Bit:
return 39;
default:
UNREACHABLE();
ASSERT(false);
return {};
}
}
@@ -128,7 +128,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
const std::size_t needed_size{
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
UNREACHABLE();
ASSERT(false);
return ResultOutOfMemory;
}
@@ -542,6 +542,95 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
return ResultSuccess;
}
bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
const auto& pg = pg_ll.Nodes();
const auto& memory_layout = system.Kernel().MemoryLayout();
// Empty groups are necessarily invalid.
if (pg.empty()) {
return false;
}
// We're going to validate that the group we'd expect is the group we see.
auto cur_it = pg.begin();
PAddr cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() {
if (cur_block_pages == 0) {
if ((++cur_it) == pg.end()) {
return false;
}
cur_block_address = cur_it->GetAddress();
cur_block_pages = cur_it->GetNumPages();
}
return true;
};
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
return false;
}
// Prepare tracking variables.
PAddr cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
// Iterate, comparing expected to actual.
while (tot_size < size) {
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
return false;
}
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
return false;
}
cur_block_address += cur_size;
cur_block_pages -= cur_pages;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
// Ensure we compare the right amount for the last block.
if (tot_size > size) {
cur_size -= (tot_size - size);
}
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -1341,7 +1430,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
new_state = KMemoryState::AliasCodeData;
break;
default:
UNREACHABLE();
ASSERT(false);
}
}
@@ -1687,22 +1776,22 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
return this->LockMemoryAndOpen(
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None,
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Locked, nullptr);
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
const KPageLinkedList& pg) {
return this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1734,9 +1823,7 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
if (!paddr) {
UNREACHABLE();
}
ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
@@ -1767,7 +1854,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
break;
default:
UNREACHABLE();
ASSERT(false);
}
addr += size;
@@ -1798,7 +1885,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
case OperationType::ChangePermissionsAndRefresh:
break;
default:
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;
}
@@ -1835,7 +1922,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
return code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -1871,7 +1957,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
return code_region_end - code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -2125,7 +2210,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_
// Check the page group.
if (pg != nullptr) {
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
}
// Decide on new perm and attr.

View File

@@ -72,8 +72,8 @@ public:
KMemoryPermission perm, PAddr map_addr = 0);
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
@@ -178,6 +178,7 @@ private:
const KPageLinkedList* pg);
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();

View File

@@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
session_ptr->ClientConnected(server.AcceptSession());
} else {
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;

View File

@@ -350,7 +350,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
break;
default:
UNREACHABLE();
ASSERT(false);
}
// Create TLS region

View File

@@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
"object_id {} is too big! This probably means a recent service call "
"to {} needed to return a new interface!",
object_id, name);
UNREACHABLE();
ASSERT(false);
return ResultSuccess; // Ignore error if asserts are off
}
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
return strong_ptr->HandleSyncRequest(*this, context);
} else {
UNREACHABLE();
ASSERT(false);
return ResultSuccess;
}

View File

@@ -133,7 +133,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNIMPLEMENTED();
break;
default:
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
thread_type = type;

View File

@@ -212,7 +212,9 @@ struct KernelCore::Impl {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
const auto total_size{sizes.first};
const auto kernel_size{sizes.second};
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -252,6 +254,7 @@ struct KernelCore::Impl {
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
suspend_threads[core_id]->DisableDispatch();
}
}
@@ -1073,9 +1076,6 @@ void KernelCore::Suspend(bool in_suspention) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
}
}
}
}

View File

@@ -1876,7 +1876,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
KScheduler::YieldToAnyThread(kernel);
} else {
// Nintendo does nothing at all if an otherwise invalid value is passed.
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}

View File

@@ -178,7 +178,7 @@ ResultCode Controller::GetStatus() const {
}
void Controller::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void Controller::Execute() {

View File

@@ -156,7 +156,7 @@ ResultCode Error::GetStatus() const {
}
void Error::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data!");
ASSERT_MSG(false, "Unexpected interactive applet data!");
}
void Error::Execute() {

View File

@@ -76,7 +76,7 @@ ResultCode Auth::GetStatus() const {
}
void Auth::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void Auth::Execute() {
@@ -175,7 +175,7 @@ ResultCode PhotoViewer::GetStatus() const {
}
void PhotoViewer::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void PhotoViewer::Execute() {

View File

@@ -67,7 +67,7 @@ ResultCode MiiEdit::GetStatus() const {
}
void MiiEdit::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void MiiEdit::Execute() {

View File

@@ -44,7 +44,7 @@ ResultCode ProfileSelect::GetStatus() const {
}
void ProfileSelect::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void ProfileSelect::Execute() {

View File

@@ -71,7 +71,7 @@ void SoftwareKeyboard::Initialize() {
InitializeBackground(applet_mode);
break;
default:
UNREACHABLE_MSG("Invalid LibraryAppletMode={}", applet_mode);
ASSERT_MSG(false, "Invalid LibraryAppletMode={}", applet_mode);
break;
}
}

View File

@@ -279,7 +279,7 @@ void WebBrowser::Initialize() {
InitializeLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
break;
}
}
@@ -320,7 +320,7 @@ void WebBrowser::Execute() {
ExecuteLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
WebBrowserExit(WebExitReason::EndButtonPressed);
break;
}

View File

@@ -899,7 +899,7 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
case FileSys::SaveDataSpaceId::TemporaryStorage:
case FileSys::SaveDataSpaceId::ProperSystem:
case FileSys::SaveDataSpaceId::SafeMode:
UNREACHABLE();
ASSERT(false);
}
auto filesystem = std::make_shared<IFileSystem>(system, std::move(dir.Unwrap()),

View File

@@ -61,6 +61,7 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
}
last_update_timestamp = shared_memory->gesture_lifo.timestamp;
UpdateGestureSharedMemory(gesture, time_difference);
}
void Controller_Gesture::ReadTouchInput() {
@@ -94,8 +95,7 @@ bool Controller_Gesture::ShouldUpdateGesture(const GestureProperties& gesture,
return false;
}
void Controller_Gesture::UpdateGestureSharedMemory(u8* data, std::size_t size,
GestureProperties& gesture,
void Controller_Gesture::UpdateGestureSharedMemory(GestureProperties& gesture,
f32 time_difference) {
GestureType type = GestureType::Idle;
GestureAttribute attributes{};

View File

@@ -107,8 +107,7 @@ private:
bool ShouldUpdateGesture(const GestureProperties& gesture, f32 time_difference);
// Updates the shared memory to the next state
void UpdateGestureSharedMemory(u8* data, std::size_t size, GestureProperties& gesture,
f32 time_difference);
void UpdateGestureSharedMemory(GestureProperties& gesture, f32 time_difference);
// Initializes new gesture
void NewGesture(GestureProperties& gesture, GestureType& type, GestureAttribute& attributes);

View File

@@ -160,7 +160,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
shared_memory->system_properties.raw = 0;
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
shared_memory->style_tag.fullkey.Assign(1);
@@ -422,7 +422,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
libnx_state.connection_status.is_connected.Assign(1);
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::NES:
@@ -597,7 +597,7 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::Pokeball:
@@ -856,7 +856,7 @@ void Controller_NPad::VibrateController(
}
if (vibration_device_handle.device_index == Core::HID::DeviceIndex::None) {
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
return;
}

View File

@@ -1441,7 +1441,7 @@ void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
break;
case Core::HID::DeviceIndex::None:
default:
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
break;
}

View File

@@ -347,7 +347,7 @@ public:
}
if (!succeeded) {
UNREACHABLE_MSG("Out of address space!");
ASSERT_MSG(false, "Out of address space!");
return Kernel::ResultOutOfMemory;
}

View File

@@ -290,7 +290,7 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
u8 glasses_type{};
while (glasses_type_start < glasses_type_info.values[glasses_type]) {
if (++glasses_type >= glasses_type_info.values_count) {
UNREACHABLE();
ASSERT(false);
break;
}
}

View File

@@ -23,7 +23,7 @@ u32 SyncpointManager::AllocateSyncpoint() {
return syncpoint_id;
}
}
UNREACHABLE_MSG("No more available syncpoints!");
ASSERT_MSG(false, "No more available syncpoints!");
return {};
}

View File

@@ -89,14 +89,6 @@ Status BufferQueueConsumer::AcquireBuffer(BufferItem* out_buffer,
LOG_DEBUG(Service_NVFlinger, "acquiring slot={}", slot);
// If the front buffer is still being tracked, update its slot state
if (core->StillTracking(*front)) {
slots[slot].acquire_called = true;
slots[slot].needs_cleanup_on_release = false;
slots[slot].buffer_state = BufferState::Acquired;
slots[slot].fence = Fence::NoFence();
}
// If the buffer has previously been acquired by the consumer, set graphic_buffer to nullptr to
// avoid unnecessarily remapping this buffer on the consumer side.
if (out_buffer->acquire_called) {
@@ -139,26 +131,11 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
++current;
}
if (slots[slot].buffer_state == BufferState::Acquired) {
slots[slot].fence = release_fence;
slots[slot].buffer_state = BufferState::Free;
slots[slot].buffer_state = BufferState::Free;
listener = core->connected_producer_listener;
listener = core->connected_producer_listener;
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
} else if (slots[slot].needs_cleanup_on_release) {
LOG_DEBUG(Service_NVFlinger, "releasing a stale buffer slot {} (state = {})", slot,
slots[slot].buffer_state);
slots[slot].needs_cleanup_on_release = false;
return Status::StaleBufferSlot;
} else {
LOG_ERROR(Service_NVFlinger, "attempted to release buffer slot {} but its state was {}",
slot, slots[slot].buffer_state);
return Status::BadValue;
}
LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
core->SignalDequeueCondition();
}

View File

@@ -84,10 +84,6 @@ void BufferQueueCore::FreeBufferLocked(s32 slot) {
slots[slot].graphic_buffer.reset();
if (slots[slot].buffer_state == BufferState::Acquired) {
slots[slot].needs_cleanup_on_release = true;
}
slots[slot].buffer_state = BufferState::Free;
slots[slot].frame_number = UINT32_MAX;
slots[slot].acquire_called = false;

View File

@@ -659,7 +659,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
value = core->consumer_usage_bit;
break;
default:
UNREACHABLE();
ASSERT(false);
return Status::BadValue;
}

View File

@@ -31,7 +31,6 @@ struct BufferSlot final {
u64 frame_number{};
Fence fence;
bool acquire_called{};
bool needs_cleanup_on_release{};
bool attached_by_consumer{};
bool is_preallocated{};
};

View File

@@ -48,12 +48,12 @@ ResultCode StandardUserSystemClockCore::GetClockContext(Core::System& system,
}
ResultCode StandardUserSystemClockCore::Flush(const SystemClockContext&) {
UNREACHABLE();
UNIMPLEMENTED();
return ERROR_NOT_IMPLEMENTED;
}
ResultCode StandardUserSystemClockCore::SetClockContext(const SystemClockContext&) {
UNREACHABLE();
UNIMPLEMENTED();
return ERROR_NOT_IMPLEMENTED;
}

View File

@@ -111,7 +111,7 @@ struct TimeManager::Impl final {
FileSys::VirtualFile& vfs_file) {
if (time_zone_content_manager.GetTimeZoneManager().SetDeviceLocationNameWithTimeZoneRule(
location_name, vfs_file) != ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -155,7 +155,7 @@ struct TimeManager::Impl final {
} else {
if (standard_local_system_clock_core.SetCurrentTime(system_, posix_time) !=
ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
}
@@ -170,7 +170,7 @@ struct TimeManager::Impl final {
if (standard_network_system_clock_core.SetSystemClockContext(clock_context) !=
ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -183,7 +183,7 @@ struct TimeManager::Impl final {
Clock::SteadyClockTimePoint steady_clock_time_point) {
if (standard_user_system_clock_core.SetAutomaticCorrectionEnabled(
system_, is_automatic_correction_enabled) != ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -203,7 +203,7 @@ struct TimeManager::Impl final {
if (GetStandardLocalSystemClockCore()
.SetCurrentTime(system_, timespan.ToSeconds())
.IsError()) {
UNREACHABLE();
ASSERT(false);
return;
}
}

View File

@@ -279,7 +279,7 @@ static constexpr int TransitionTime(int year, Rule rule, int offset) {
break;
}
default:
UNREACHABLE();
ASSERT(false);
}
return value + rule.transition_time + offset;
}

View File

@@ -128,11 +128,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
// Apply patches if necessary
if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) {
std::vector<u8> pi_header;
pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header),
reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader));
pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.data(),
program_image.data() + program_image.size());
std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size());
std::memcpy(pi_header.data(), &nso_header, sizeof(NSOHeader));
std::memcpy(pi_header.data() + sizeof(NSOHeader), program_image.data(),
program_image.size());
pi_header = pm->PatchNSO(pi_header, nso_file.GetName());

View File

@@ -25,7 +25,6 @@ u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
return memory.Read64(addr);
default:
UNREACHABLE();
return 0;
}
}

View File

@@ -58,7 +58,7 @@ public:
[[nodiscard]] Stack Remove(Token token) const;
private:
boost::container::small_vector<StackEntry, 3> entries;
std::vector<StackEntry> entries;
};
struct IndirectBranch {

View File

@@ -975,13 +975,7 @@ private:
Environment& env;
IR::AbstractSyntaxList& syntax_list;
bool uses_demote_to_helper{};
// TODO: C++20 Remove this when all compilers support constexpr std::vector
#if __cpp_lib_constexpr_vector >= 201907
static constexpr Flow::Block dummy_flow_block;
#else
const Flow::Block dummy_flow_block;
#endif
};
} // Anonymous namespace

View File

@@ -224,7 +224,7 @@ void Codec::Decode() {
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
return vp9_decoder->GetFrameBytes();
default:
UNREACHABLE();
ASSERT(false);
return std::vector<u8>{};
}
}();

View File

@@ -228,7 +228,7 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
break;
}
default:
UNREACHABLE();
ASSERT(false);
break;
}
gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),

View File

@@ -202,7 +202,7 @@ public:
case Size::Size_11_11_10:
return 3;
default:
UNREACHABLE();
ASSERT(false);
return 1;
}
}
@@ -238,7 +238,7 @@ public:
case Size::Size_11_11_10:
return 4;
default:
UNREACHABLE();
ASSERT(false);
return 1;
}
}
@@ -274,7 +274,7 @@ public:
case Size::Size_11_11_10:
return "11_11_10";
default:
UNREACHABLE();
ASSERT(false);
return {};
}
}
@@ -296,7 +296,7 @@ public:
case Type::Float:
return "FLOAT";
}
UNREACHABLE();
ASSERT(false);
return {};
}
@@ -336,7 +336,7 @@ public:
case 3:
return {x3, y3};
default:
UNREACHABLE();
ASSERT(false);
return {0, 0};
}
}
@@ -1193,7 +1193,7 @@ public:
case IndexFormat::UnsignedInt:
return 4;
}
UNREACHABLE();
ASSERT(false);
return 1;
}

View File

@@ -62,7 +62,7 @@ void MaxwellDMA::Launch() {
if (!is_src_pitch && !is_dst_pitch) {
// If both the source and the destination are in block layout, assert.
UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented");
UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
return;
}
@@ -260,7 +260,7 @@ void MaxwellDMA::ReleaseSemaphore() {
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
break;
default:
UNREACHABLE_MSG("Unknown semaphore type: {}", static_cast<u32>(type.Value()));
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
}
}

View File

@@ -31,7 +31,8 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
VideoCore::RasterizerInterface* const rasterizer = renderer.ReadRasterizer();
while (!stop_token.stop_requested()) {
CommandDataContainer next = state.queue.PopWait(stop_token);
CommandDataContainer next;
state.queue.Pop(next, stop_token);
if (stop_token.stop_requested()) {
break;
}
@@ -49,7 +50,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
} else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) {
rasterizer->OnCPUWrite(invalidate->addr, invalidate->size);
} else {
UNREACHABLE();
ASSERT(false);
}
state.signaled_fence.store(next.fence);
if (next.block) {

View File

@@ -10,7 +10,7 @@
#include <thread>
#include <variant>
#include "common/threadsafe_queue.h"
#include "common/bounded_threadsafe_queue.h"
#include "video_core/framebuffer_config.h"
namespace Tegra {
@@ -96,9 +96,9 @@ struct CommandDataContainer {
/// Struct used to synchronize the GPU thread
struct SynchState final {
using CommandQueue = Common::SPSCQueue<CommandDataContainer, true>;
using CommandQueue = Common::MPSCQueue<CommandDataContainer>;
std::mutex write_lock;
CommandQueue queue;
CommandQueue queue{512}; // size must be 2^n
u64 last_fence{};
std::atomic<u64> signaled_fence{};
std::condition_variable_any cv;

View File

@@ -71,7 +71,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
}
}
if (!mid_method.has_value()) {
UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
ASSERT_MSG(false, "Macro 0x{0:x} was not uploaded", method);
return;
}
}

View File

@@ -308,7 +308,6 @@ bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond,
return value != 0;
}
UNREACHABLE();
return true;
}
Macro::Opcode MacroInterpreterImpl::GetOpcode() const {

View File

@@ -411,7 +411,7 @@ void MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
Xbyak::Label end;
auto value = Compile_GetRegister(opcode.src_a, eax);
test(value, value);
cmp(value, 0); // test(value, value);
if (optimizer.has_delayed_pc) {
switch (opcode.branch_condition) {
case Macro::BranchCondition::Zero:

View File

@@ -67,7 +67,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
ASSERT(it->first == gpu_addr);
map_ranges.erase(it);
} else {
UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr);
ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
}
const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
@@ -206,7 +206,7 @@ T MemoryManager::Read(GPUVAddr addr) const {
return value;
}
UNREACHABLE();
ASSERT(false);
return {};
}
@@ -219,7 +219,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
return;
}
UNREACHABLE();
ASSERT(false);
}
template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;

View File

@@ -48,7 +48,7 @@ GLenum Stage(size_t stage_index) {
case 4:
return GL_FRAGMENT_SHADER;
}
UNREACHABLE_MSG("{}", stage_index);
ASSERT_MSG(false, "{}", stage_index);
return GL_NONE;
}
@@ -65,7 +65,7 @@ GLenum AssemblyStage(size_t stage_index) {
case 4:
return GL_FRAGMENT_PROGRAM_NV;
}
UNREACHABLE_MSG("{}", stage_index);
ASSERT_MSG(false, "{}", stage_index);
return GL_NONE;
}

View File

@@ -85,7 +85,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
case Maxwell::TessellationPrimitive::Quads:
return Shader::TessPrimitive::Quads;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessPrimitive::Triangles;
}();
info.tess_spacing = [&] {
@@ -97,7 +97,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
case Maxwell::TessellationSpacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessSpacing::Equal;
}();
break;

View File

@@ -83,7 +83,7 @@ GLenum ImageTarget(const VideoCommon::ImageInfo& info) {
case ImageType::Buffer:
return GL_TEXTURE_BUFFER;
}
UNREACHABLE_MSG("Invalid image type={}", info.type);
ASSERT_MSG(false, "Invalid image type={}", info.type);
return GL_NONE;
}
@@ -107,7 +107,7 @@ GLenum ImageTarget(Shader::TextureType type, int num_samples = 1) {
case Shader::TextureType::Buffer:
return GL_TEXTURE_BUFFER;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return GL_NONE;
}
@@ -119,7 +119,7 @@ GLenum TextureMode(PixelFormat format, bool is_first) {
case PixelFormat::S8_UINT_D24_UNORM:
return is_first ? GL_STENCIL_INDEX : GL_DEPTH_COMPONENT;
default:
UNREACHABLE();
ASSERT(false);
return GL_DEPTH_COMPONENT;
}
}
@@ -140,7 +140,7 @@ GLint Swizzle(SwizzleSource source) {
case SwizzleSource::OneFloat:
return GL_ONE;
}
UNREACHABLE_MSG("Invalid swizzle source={}", source);
ASSERT_MSG(false, "Invalid swizzle source={}", source);
return GL_NONE;
}
@@ -197,7 +197,7 @@ GLint ConvertA5B5G5R1_UNORM(SwizzleSource source) {
case SwizzleSource::OneFloat:
return GL_ONE;
}
UNREACHABLE_MSG("Invalid swizzle source={}", source);
ASSERT_MSG(false, "Invalid swizzle source={}", source);
return GL_NONE;
}
@@ -381,10 +381,10 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
glTextureStorage3D(handle, gl_num_levels, gl_internal_format, width, height, depth);
break;
case GL_TEXTURE_BUFFER:
UNREACHABLE();
ASSERT(false);
break;
default:
UNREACHABLE_MSG("Invalid target=0x{:x}", target);
ASSERT_MSG(false, "Invalid target=0x{:x}", target);
break;
}
return texture;
@@ -420,7 +420,7 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
case Shader::ImageFormat::R32G32B32A32_UINT:
return GL_RGBA32UI;
}
UNREACHABLE_MSG("Invalid image format={}", format);
ASSERT_MSG(false, "Invalid image format={}", format);
return GL_R32UI;
}
@@ -579,7 +579,7 @@ void TextureCacheRuntime::EmulateCopyImage(Image& dst, Image& src,
} else if (IsPixelFormatBGR(dst.info.format) || IsPixelFormatBGR(src.info.format)) {
format_conversion_pass.ConvertImage(dst, src, copies);
} else {
UNREACHABLE();
ASSERT(false);
}
}
@@ -620,7 +620,7 @@ void TextureCacheRuntime::AccelerateImageUpload(Image& image, const ImageBufferM
case ImageType::Linear:
return util_shaders.PitchUpload(image, map, swizzles);
default:
UNREACHABLE();
ASSERT(false);
break;
}
}
@@ -639,7 +639,7 @@ FormatProperties TextureCacheRuntime::FormatInfo(ImageType type, GLenum internal
case ImageType::e3D:
return format_properties[2].at(internal_format);
default:
UNREACHABLE();
ASSERT(false);
return FormatProperties{};
}
}
@@ -888,7 +888,7 @@ void Image::CopyBufferToImage(const VideoCommon::BufferImageCopy& copy, size_t b
}
break;
default:
UNREACHABLE();
ASSERT(false);
}
}
@@ -924,7 +924,7 @@ void Image::CopyImageToBuffer(const VideoCommon::BufferImageCopy& copy, size_t b
depth = copy.image_extent.depth;
break;
default:
UNREACHABLE();
ASSERT(false);
}
// Compressed formats don't have a pixel format or type
const bool is_compressed = gl_format == GL_NONE;
@@ -950,7 +950,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return GL_DEPTH_STENCIL_ATTACHMENT;
default:
UNREACHABLE();
ASSERT(false);
return GL_COLOR_ATTACHMENT0;
}
}();
@@ -965,7 +965,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
default:
UNREACHABLE();
ASSERT(false);
return GL_COLOR_BUFFER_BIT;
}
}();
@@ -980,7 +980,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return 3;
default:
UNREACHABLE();
ASSERT(false);
return 0;
}
}();
@@ -1045,7 +1045,7 @@ bool Image::ScaleUp(bool ignore) {
return false;
}
if (info.type == ImageType::Linear) {
UNREACHABLE();
ASSERT(false);
return false;
}
flags |= ImageFlagBits::Rescaled;
@@ -1139,7 +1139,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
UNIMPLEMENTED();
break;
case ImageViewType::Buffer:
UNREACHABLE();
ASSERT(false);
break;
}
switch (info.type) {
@@ -1319,7 +1319,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
buffer_bits |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
break;
default:
UNREACHABLE();
ASSERT(false);
buffer_bits |= GL_DEPTH_BUFFER_BIT;
break;
}

View File

@@ -206,7 +206,7 @@ inline GLenum IndexFormat(Maxwell::IndexFormat index_format) {
case Maxwell::IndexFormat::UnsignedInt:
return GL_UNSIGNED_INT;
}
UNREACHABLE_MSG("Invalid index_format={}", index_format);
ASSERT_MSG(false, "Invalid index_format={}", index_format);
return {};
}
@@ -243,7 +243,7 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
case Maxwell::PrimitiveTopology::Patches:
return GL_PATCHES;
}
UNREACHABLE_MSG("Invalid topology={}", topology);
ASSERT_MSG(false, "Invalid topology={}", topology);
return GL_POINTS;
}
@@ -271,8 +271,8 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
}
break;
}
UNREACHABLE_MSG("Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
mipmap_filter_mode);
ASSERT_MSG(false, "Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
mipmap_filter_mode);
return GL_NEAREST;
}
@@ -550,7 +550,7 @@ inline GLenum PolygonMode(Maxwell::PolygonMode polygon_mode) {
case Maxwell::PolygonMode::Fill:
return GL_FILL;
}
UNREACHABLE_MSG("Invalid polygon mode={}", polygon_mode);
ASSERT_MSG(false, "Invalid polygon mode={}", polygon_mode);
return GL_FILL;
}
@@ -563,7 +563,7 @@ inline GLenum ReductionFilter(Tegra::Texture::SamplerReduction filter) {
case Tegra::Texture::SamplerReduction::Max:
return GL_MAX;
}
UNREACHABLE_MSG("Invalid reduction filter={}", static_cast<int>(filter));
ASSERT_MSG(false, "Invalid reduction filter={}", static_cast<int>(filter));
return GL_WEIGHTED_AVERAGE_ARB;
}

View File

@@ -79,7 +79,7 @@ const char* GetSource(GLenum source) {
case GL_DEBUG_SOURCE_OTHER:
return "OTHER";
default:
UNREACHABLE();
ASSERT(false);
return "Unknown source";
}
}
@@ -101,7 +101,7 @@ const char* GetType(GLenum type) {
case GL_DEBUG_TYPE_MARKER:
return "MARKER";
default:
UNREACHABLE();
ASSERT(false);
return "Unknown type";
}
}

View File

@@ -282,7 +282,7 @@ GLenum StoreFormat(u32 bytes_per_block) {
case 16:
return GL_RGBA32UI;
}
UNREACHABLE();
ASSERT(false);
return GL_R8UI;
}

View File

@@ -25,7 +25,7 @@ VkFilter Filter(Tegra::Texture::TextureFilter filter) {
case Tegra::Texture::TextureFilter::Linear:
return VK_FILTER_LINEAR;
}
UNREACHABLE_MSG("Invalid sampler filter={}", filter);
ASSERT_MSG(false, "Invalid sampler filter={}", filter);
return {};
}
@@ -42,7 +42,7 @@ VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter
case Tegra::Texture::TextureMipmapFilter::Linear:
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
}
UNREACHABLE_MSG("Invalid sampler mipmap mode={}", mipmap_filter);
ASSERT_MSG(false, "Invalid sampler mipmap mode={}", mipmap_filter);
return {};
}
@@ -70,7 +70,7 @@ VkSamplerAddressMode WrapMode(const Device& device, Tegra::Texture::WrapMode wra
case Tegra::Texture::TextureFilter::Linear:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
}
UNREACHABLE();
ASSERT(false);
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
@@ -744,7 +744,7 @@ VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle)
case Maxwell::ViewportSwizzle::NegativeW:
return VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV;
}
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
return {};
}
@@ -757,7 +757,7 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
case Tegra::Texture::SamplerReduction::Max:
return VK_SAMPLER_REDUCTION_MODE_MAX_EXT;
}
UNREACHABLE_MSG("Invalid sampler mode={}", static_cast<int>(reduction));
ASSERT_MSG(false, "Invalid sampler mode={}", static_cast<int>(reduction));
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
}
@@ -780,7 +780,7 @@ VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
case Tegra::Texture::MsaaMode::Msaa4x4:
return VK_SAMPLE_COUNT_16_BIT;
default:
UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
ASSERT_MSG(false, "Invalid msaa_mode={}", static_cast<int>(msaa_mode));
return VK_SAMPLE_COUNT_1_BIT;
}
}

View File

@@ -46,7 +46,7 @@ size_t BytesPerIndex(VkIndexType index_type) {
case VK_INDEX_TYPE_UINT32:
return 4;
default:
UNREACHABLE_MSG("Invalid index type={}", index_type);
ASSERT_MSG(false, "Invalid index type={}", index_type);
return 1;
}
}
@@ -366,7 +366,7 @@ void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle
std::memcpy(staging_data, MakeQuadIndices<u32>(quad, first).data(), quad_size);
break;
default:
UNREACHABLE();
ASSERT(false);
break;
}
staging_data += quad_size;

View File

@@ -265,7 +265,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedInt:
return 2;
}
UNREACHABLE();
ASSERT(false);
return 2;
}();
const u32 input_size = num_vertices << index_shift;
@@ -333,7 +333,7 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
const VkImageMemoryBarrier image_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = is_initialized ? VK_ACCESS_SHADER_WRITE_BIT : VkAccessFlags{},
.srcAccessMask = is_initialized ? VK_ACCESS_SHADER_WRITE_BIT : VK_ACCESS_NONE,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
.oldLayout = is_initialized ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,

View File

@@ -174,7 +174,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
case Maxwell::TessellationPrimitive::Quads:
return Shader::TessPrimitive::Quads;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessPrimitive::Triangles;
}();
info.tess_spacing = [&] {
@@ -187,7 +187,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
case Maxwell::TessellationSpacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessSpacing::Equal;
}();
break;

View File

@@ -263,7 +263,7 @@ StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage
case MemoryUsage::Download:
return download_cache;
default:
UNREACHABLE_MSG("Invalid memory usage={}", usage);
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return upload_cache;
}
}

View File

@@ -70,7 +70,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case ImageType::Buffer:
break;
}
UNREACHABLE_MSG("Invalid image type={}", type);
ASSERT_MSG(false, "Invalid image type={}", type);
return {};
}
@@ -87,7 +87,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case 16:
return VK_SAMPLE_COUNT_16_BIT;
default:
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
return VK_SAMPLE_COUNT_1_BIT;
}
}
@@ -107,7 +107,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
break;
default:
UNREACHABLE_MSG("Invalid surface type");
ASSERT_MSG(false, "Invalid surface type");
}
}
if (info.storage) {
@@ -179,7 +179,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case VideoCore::Surface::SurfaceType::DepthStencil:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
default:
UNREACHABLE_MSG("Invalid surface type");
ASSERT_MSG(false, "Invalid surface type");
return VkImageAspectFlags{};
}
}
@@ -221,7 +221,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case SwizzleSource::OneInt:
return VK_COMPONENT_SWIZZLE_ONE;
}
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
return VK_COMPONENT_SWIZZLE_ZERO;
}
@@ -242,10 +242,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case Shader::TextureType::ColorArrayCube:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case Shader::TextureType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
ASSERT_MSG(false, "Texture buffers can't be image views");
return VK_IMAGE_VIEW_TYPE_1D;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return VK_IMAGE_VIEW_TYPE_2D;
}
@@ -269,10 +269,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
UNIMPLEMENTED_MSG("Rect image view");
return VK_IMAGE_VIEW_TYPE_2D;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
ASSERT_MSG(false, "Texture buffers can't be image views");
return VK_IMAGE_VIEW_TYPE_1D;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return VK_IMAGE_VIEW_TYPE_2D;
}
@@ -644,7 +644,7 @@ struct RangedBarrierRange {
case Shader::ImageFormat::R32G32B32A32_UINT:
return VK_FORMAT_R32G32B32A32_UINT;
}
UNREACHABLE_MSG("Invalid image format={}", format);
ASSERT_MSG(false, "Invalid image format={}", format);
return VK_FORMAT_R32_UINT;
}
@@ -1596,7 +1596,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
UNIMPLEMENTED();
break;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE();
ASSERT(false);
break;
}
}
@@ -1822,7 +1822,7 @@ void TextureCacheRuntime::AccelerateImageUpload(
if (IsPixelFormatASTC(image.info.format)) {
return astc_decoder_pass.Assemble(image, map, swizzles);
}
UNREACHABLE();
ASSERT(false);
}
} // namespace Vulkan

View File

@@ -280,7 +280,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
stage_index = 4;
break;
default:
UNREACHABLE_MSG("Invalid program={}", program);
ASSERT_MSG(false, "Invalid program={}", program);
break;
}
const u64 local_size{sph.LocalMemorySize()};

View File

@@ -29,7 +29,7 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
return SurfaceTarget::Texture2DArray;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented texture_type={}", texture_type);
UNREACHABLE();
ASSERT(false);
return SurfaceTarget::Texture2D;
}
}
@@ -48,7 +48,7 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) {
return true;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
UNREACHABLE();
ASSERT(false);
return false;
}
}
@@ -67,7 +67,7 @@ bool SurfaceTargetIsArray(SurfaceTarget target) {
return true;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
UNREACHABLE();
ASSERT(false);
return false;
}
}

View File

@@ -94,7 +94,7 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
resources.layers = 1;
break;
default:
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
break;
}
if (type != ImageType::Linear) {

View File

@@ -71,7 +71,7 @@ ImageViewInfo::ImageViewInfo(const TICEntry& config, s32 base_layer) noexcept
range.extent.layers = config.Depth() * 6;
break;
default:
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
break;
}
}

View File

@@ -23,7 +23,7 @@ namespace VideoCommon {
case 16:
return {2, 2};
}
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
return {1, 1};
}
@@ -47,7 +47,7 @@ namespace VideoCommon {
case MsaaMode::Msaa4x4:
return 16;
}
UNREACHABLE_MSG("Invalid MSAA mode={}", static_cast<int>(msaa_mode));
ASSERT_MSG(false, "Invalid MSAA mode={}", static_cast<int>(msaa_mode));
return 1;
}

View File

@@ -1485,14 +1485,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
const auto page_it = selected_page_table.find(page);
if (page_it == selected_page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageId>& image_ids = page_it->second;
const auto vector_it = std::ranges::find(image_ids, image_id);
if (vector_it == image_ids.end()) {
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
return;
}
image_ids.erase(vector_it);
@@ -1504,14 +1504,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
const auto page_it = page_table.find(page);
if (page_it == page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageMapId>& image_map_ids = page_it->second;
const auto vector_it = std::ranges::find(image_map_ids, map_id);
if (vector_it == image_map_ids.end()) {
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
return;
}
image_map_ids.erase(vector_it);
@@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
const auto page_it = page_table.find(page);
if (page_it == page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageMapId>& image_map_ids = page_it->second;
@@ -1616,15 +1616,15 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
const GPUVAddr gpu_addr = image.gpu_addr;
const auto alloc_it = image_allocs_table.find(gpu_addr);
if (alloc_it == image_allocs_table.end()) {
UNREACHABLE_MSG("Trying to delete an image alloc that does not exist in address 0x{:x}",
gpu_addr);
ASSERT_MSG(false, "Trying to delete an image alloc that does not exist in address 0x{:x}",
gpu_addr);
return;
}
const ImageAllocId alloc_id = alloc_it->second;
std::vector<ImageId>& alloc_images = slot_image_allocs[alloc_id].images;
const auto alloc_image_it = std::ranges::find(alloc_images, image_id);
if (alloc_image_it == alloc_images.end()) {
UNREACHABLE_MSG("Trying to delete an image that does not exist");
ASSERT_MSG(false, "Trying to delete an image that does not exist");
return;
}
ASSERT_MSG(False(image.flags & ImageFlagBits::Tracked), "Image was not untracked");

Some files were not shown because too many files have changed in this diff Show More