Compare commits

..

25 Commits

Author SHA1 Message Date
Morph
5568763a57 vk_compute_pass: Use VK_ACCESS_NONE
This enumeration was introduced in Vulkan 1.3, prefer using this instead of defaulting the enum.

Also resolves a narrowing conversion warning on MSVC.
2022-06-14 09:14:13 -04:00
Mai
a3b12e3809 Merge pull request #8439 from liamwhite/monkey-compiler
general: fix compilation on GCC 12
2022-06-14 08:34:16 -04:00
Mai
dc47d0f624 Merge pull request #8459 from Morph1984/wextra-gcc
vk_compute_pass: Silence Wextra warning
2022-06-14 08:22:38 -04:00
Morph
fcfe192e83 vk_compute_pass: Silence Wextra warning
Silences a warning about using enumerated and non-enumerated types in a conditional expression.
2022-06-14 05:29:57 -04:00
Liam
bd38aefc57 kernel: fix passthrough of local captures in lambda 2022-06-13 20:09:32 -04:00
Liam
feaf010fa2 common/assert: rework ASSERT handling to avoid std::function usage 2022-06-13 20:09:32 -04:00
Liam
ebecdd3a74 general: fix compilation on MinGW GCC 12 2022-06-13 20:09:32 -04:00
Liam
a29ddcee40 common/assert: add unlikely 2022-06-13 20:09:32 -04:00
Liam
d11547024c general: fix compilation on GCC 12 2022-06-13 20:09:30 -04:00
Liam
6f59e2676b kernel: ensure class token lambda exit is unreachable 2022-06-13 20:09:00 -04:00
Liam
8fea7e56e5 kernel: fix inconsistency in AutoObjectTraits macro definitions 2022-06-13 20:09:00 -04:00
Liam
58fea44eb5 common: Don't test ASSERT conditions inline 2022-06-13 20:09:00 -04:00
Liam
084d7d6b01 common: Change semantics of UNREACHABLE to unconditionally crash 2022-06-13 20:09:00 -04:00
liamwhite
bd3bfe411d Merge pull request #8458 from lat9nq/no-constexpr-flow-block
structured_control_flow: Remove constexpr Flow::Block
2022-06-13 20:06:38 -04:00
lat9nq
963ed37fd6 structured_control_flow: Remove constexpr Flow::Block
This seems to be unsupported in newer libstdc++ versions due to
Flow::Block's base class being a non-literal type. It's not clear to me
why this was permitted in earlier versions.
2022-06-13 19:18:20 -04:00
bunnei
741da9c8bf Merge pull request #8388 from liamwhite/simpler-pause
CpuManager: simplify pausing
2022-06-13 15:48:03 -07:00
Morph
a0407a8e64 Merge pull request #8446 from liamwhite/cmd-gdb
core/debugger: support operation in yuzu-cmd
2022-06-13 14:38:37 -04:00
Morph
7582717c9d Merge pull request #8454 from liamwhite/inaddr-any
core/debugger: allow remote connections
2022-06-13 14:38:20 -04:00
bunnei
ec85eac3c9 Merge pull request #8443 from liamwhite/code-mem
kernel: fix KCodeMemory initialization
2022-06-13 11:32:27 -07:00
Liam
fb4b507ba4 core/debugger: allow remote connections 2022-06-12 11:50:50 -04:00
Liam
c3cc65a11e yuzu-cmd: ignore bogus timeous from SDL 2022-06-10 12:49:18 -04:00
Liam
1f0fee33ed core/debugger: fix a number of shutdown deadlocks 2022-06-10 09:17:12 -04:00
Liam
de6c0defb3 core/debugger: support operation in yuzu-cmd 2022-06-10 09:11:02 -04:00
Liam
6c659c3a16 kernel: fix KCodeMemory initialization 2022-06-09 12:33:28 -04:00
Liam
af022294dd CpuManager: simplify pausing 2022-06-08 21:47:29 -04:00
98 changed files with 477 additions and 354 deletions

View File

@@ -40,6 +40,11 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
add_library(microprofile INTERFACE)
target_include_directories(microprofile INTERFACE ./microprofile)
# GCC bugs
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "12" AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND MINGW)
target_compile_options(microprofile INTERFACE "-Wno-array-bounds")
endif()
# libusb
if (NOT LIBUSB_FOUND OR YUZU_USE_BUNDLED_LIBUSB)
add_subdirectory(libusb)

View File

@@ -429,7 +429,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
in_params.node_id);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
}
}
@@ -1312,7 +1312,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::s
samples_to_read - samples_read, channel, temp_mix_offset);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
}
temp_mix_offset += samples_decoded;

View File

@@ -50,7 +50,7 @@ EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
effects[i] = std::make_unique<EffectBiquadFilter>();
break;
default:
UNREACHABLE_MSG("Unimplemented effect {}", effect);
ASSERT_MSG(false, "Unimplemented effect {}", effect);
effects[i] = std::make_unique<EffectStubbed>();
}
return GetInfo(i);
@@ -104,7 +104,7 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
return;
}

View File

@@ -483,7 +483,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
// Add more work
index_stack.push(j);
} else if (node_state == NodeStates::State::InFound) {
UNREACHABLE_MSG("Node start marked as found");
ASSERT_MSG(false, "Node start marked as found");
ResetState();
return false;
}

View File

@@ -114,7 +114,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
in_params.current_playstate = ServerPlayState::Play;
break;
default:
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
break;
}
@@ -410,7 +410,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
return in_params.should_depop;
}
default:
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
}
return false;

View File

@@ -6,8 +6,13 @@
#include "common/settings.h"
void assert_handle_failure() {
void assert_fail_impl() {
if (Settings::values.use_debug_asserts) {
Crash();
}
}
[[noreturn]] void unreachable_impl() {
Crash();
throw std::runtime_error("Unreachable code");
}

View File

@@ -9,44 +9,43 @@
// Sometimes we want to try to continue even after hitting an assert.
// However touching this file yields a global recompilation as this header is included almost
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
void assert_handle_failure();
// For asserts we'd like to keep all the junk executed when an assert happens away from the
// important code in the function. One way of doing this is to put all the relevant code inside a
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
// template that calls the lambda. This seems to generate an extra instruction at the call-site
// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
// enough for our purposes.
template <typename Fn>
#if defined(_MSC_VER)
[[msvc::noinline]]
#elif defined(__GNUC__)
[[gnu::cold, gnu::noinline]]
void assert_fail_impl();
[[noreturn]] void unreachable_impl();
#ifdef _MSC_VER
#define YUZU_NO_INLINE __declspec(noinline)
#else
#define YUZU_NO_INLINE __attribute__((noinline))
#endif
static void
assert_noinline_call(const Fn& fn) {
fn();
assert_handle_failure();
}
#define ASSERT(_a_) \
do \
if (!(_a_)) { \
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!"); \
assert_fail_impl(); \
} \
while (0)
}())
#define ASSERT_MSG(_a_, ...) \
do \
if (!(_a_)) { \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
([&]() YUZU_NO_INLINE { \
if (!(_a_)) [[unlikely]] { \
LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
assert_fail_impl(); \
} \
while (0)
}())
#define UNREACHABLE() \
do { \
LOG_CRITICAL(Debug, "Unreachable code!"); \
unreachable_impl(); \
} while (0)
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE_MSG(...) \
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
do { \
LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
unreachable_impl(); \
} while (0)
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)

View File

@@ -147,7 +147,7 @@ void UpdateRescalingInfo() {
info.down_shift = 0;
break;
default:
UNREACHABLE();
ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
}

View File

@@ -493,6 +493,12 @@ void System::Shutdown() {
impl->Shutdown();
}
void System::DetachDebugger() {
if (impl->debugger) {
impl->debugger->NotifyShutdown();
}
}
std::unique_lock<std::mutex> System::StallCPU() {
return impl->StallCPU();
}

View File

@@ -160,6 +160,9 @@ public:
/// Shutdown the emulated system.
void Shutdown();
/// Forcibly detach the debugger if it is running.
void DetachDebugger();
std::unique_lock<std::mutex> StallCPU();
void UnstallCPU();

View File

@@ -16,7 +16,8 @@
namespace Core {
CpuManager::CpuManager(System& system_) : system{system_} {}
CpuManager::CpuManager(System& system_)
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
CpuManager::~CpuManager() = default;
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
@@ -30,8 +31,10 @@ void CpuManager::Initialize() {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
} else {
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
pause_barrier = std::make_unique<Common::Barrier>(2);
}
}
@@ -138,51 +141,14 @@ void CpuManager::MultiCoreRunSuspendThread() {
auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::MultiCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = true;
for (const auto& data : core_data) {
all_not_barrier &= !data.is_running.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.enter_barrier->Set();
}
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
for (auto& data : core_data) {
data.exit_barrier->Set();
}
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = true;
for (const auto& data : core_data) {
all_barrier &= data.is_paused.load() && data.initialized.load();
}
}
/// Don't release the barrier
}
paused_state = paused;
}
///////////////////////////////////////////////////////////////////////////////
/// SingleCore ///
///////////////////////////////////////////////////////////////////////////////
@@ -235,8 +201,9 @@ void CpuManager::SingleCoreRunSuspendThread() {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
ASSERT(scheduler.ContextSwitchPending());
ASSERT(core == kernel.GetCurrentHostThreadID());
scheduler.RescheduleCurrentCore();
}
@@ -274,37 +241,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
}
}
void CpuManager::SingleCorePause(bool paused) {
if (!paused) {
bool all_not_barrier = false;
while (!all_not_barrier) {
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
}
core_data[0].enter_barrier->Set();
if (paused_state.load()) {
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
core_data[0].exit_barrier->Set();
}
} else {
/// Wait until all cores are paused.
bool all_barrier = false;
while (!all_barrier) {
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
}
/// Don't release the barrier
}
paused_state = paused;
}
void CpuManager::Pause(bool paused) {
if (is_multicore) {
MultiCorePause(paused);
} else {
SingleCorePause(paused);
std::scoped_lock lk{pause_lock};
if (pause_state == paused) {
return;
}
// Set the new state
pause_state.store(paused);
// Wake up any waiting threads
pause_state.notify_all();
// Wait for all threads to successfully change state before returning
pause_barrier->Sync();
}
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
@@ -320,27 +271,29 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
auto& data = core_data[core];
data.enter_barrier = std::make_unique<Common::Event>();
data.exit_barrier = std::make_unique<Common::Event>();
data.host_context = Common::Fiber::ThreadToFiber();
data.is_running = false;
data.initialized = true;
const bool sc_sync = !is_async_gpu && !is_multicore;
bool sc_sync_first_use = sc_sync;
// Cleanup
SCOPE_EXIT({
data.host_context->Exit();
data.enter_barrier.reset();
data.exit_barrier.reset();
data.initialized = false;
MicroProfileOnThreadExit();
});
/// Running
while (running_mode) {
data.is_running = false;
data.enter_barrier->Wait();
if (pause_state.load(std::memory_order_relaxed)) {
// Wait for caller to acknowledge pausing
pause_barrier->Sync();
// Wait until unpaused
pause_state.wait(true, std::memory_order_relaxed);
// Wait for caller to acknowledge unpausing
pause_barrier->Sync();
}
if (sc_sync_first_use) {
system.GPU().ObtainContext();
sc_sync_first_use = false;
@@ -352,12 +305,7 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
data.is_running = true;
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
data.is_running = false;
data.is_paused = true;
data.exit_barrier->Wait();
data.is_paused = false;
}
}

View File

@@ -69,13 +69,11 @@ private:
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void MultiCoreRunSuspendThread();
void MultiCorePause(bool paused);
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
void SingleCoreRunSuspendThread();
void SingleCorePause(bool paused);
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
@@ -83,16 +81,13 @@ private:
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::unique_ptr<Common::Event> enter_barrier;
std::unique_ptr<Common::Event> exit_barrier;
std::atomic<bool> is_running;
std::atomic<bool> is_paused;
std::atomic<bool> initialized;
std::jthread host_thread;
};
std::atomic<bool> running_mode{};
std::atomic<bool> paused_state{};
std::atomic<bool> pause_state{};
std::unique_ptr<Common::Barrier> pause_barrier{};
std::mutex pause_lock{};
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};

View File

@@ -140,7 +140,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
return 0x3C;
}
UNREACHABLE();
return 0;
}
u64 GetSignatureTypePaddingSize(SignatureType type) {
@@ -155,7 +154,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
return 0x40;
}
UNREACHABLE();
return 0;
}
SignatureType Ticket::GetSignatureType() const {

View File

@@ -42,6 +42,16 @@ static std::span<const u8> ReceiveInto(Readable& r, Buffer& buffer) {
return received_data;
}
enum class SignalType {
Stopped,
ShuttingDown,
};
struct SignalInfo {
SignalType type;
Kernel::KThread* thread;
};
namespace Core {
class DebuggerImpl : public DebuggerBackend {
@@ -56,7 +66,7 @@ public:
ShutdownServer();
}
bool NotifyThreadStopped(Kernel::KThread* thread) {
bool SignalDebugger(SignalInfo signal_info) {
std::scoped_lock lk{connection_lock};
if (stopped) {
@@ -64,9 +74,13 @@ public:
// It should be ignored.
return false;
}
stopped = true;
boost::asio::write(signal_pipe, boost::asio::buffer(&thread, sizeof(thread)));
// Set up the state.
stopped = true;
info = signal_info;
// Write a single byte into the pipe to wake up the debug interface.
boost::asio::write(signal_pipe, boost::asio::buffer(&stopped, sizeof(stopped)));
return true;
}
@@ -96,7 +110,7 @@ private:
connection_thread = std::jthread([&, port](std::stop_token stop_token) {
try {
// Initialize the listening socket and accept a new client.
tcp::endpoint endpoint{boost::asio::ip::address_v4::loopback(), port};
tcp::endpoint endpoint{boost::asio::ip::address_v4::any(), port};
tcp::acceptor acceptor{io_context, endpoint};
acceptor.async_accept(client_socket, [](const auto&) {});
@@ -124,7 +138,7 @@ private:
Common::SetCurrentThreadName("yuzu:Debugger");
// Set up the client signals for new data.
AsyncReceiveInto(signal_pipe, active_thread, [&](auto d) { PipeData(d); });
AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
AsyncReceiveInto(client_socket, client_data, [&](auto d) { ClientData(d); });
// Stop the emulated CPU.
@@ -142,9 +156,28 @@ private:
}
void PipeData(std::span<const u8> data) {
AllCoreStop();
UpdateActiveThread();
frontend->Stopped(active_thread);
switch (info.type) {
case SignalType::Stopped:
// Stop emulation.
AllCoreStop();
// Notify the client.
active_thread = info.thread;
UpdateActiveThread();
frontend->Stopped(active_thread);
break;
case SignalType::ShuttingDown:
frontend->ShuttingDown();
// Wait for emulation to shut down gracefully now.
suspend.reset();
signal_pipe.close();
client_socket.shutdown(boost::asio::socket_base::shutdown_both);
LOG_INFO(Debug_GDBStub, "Shut down server");
break;
}
}
void ClientData(std::span<const u8> data) {
@@ -246,7 +279,9 @@ private:
boost::asio::ip::tcp::socket client_socket;
std::optional<std::unique_lock<std::mutex>> suspend;
SignalInfo info;
Kernel::KThread* active_thread;
bool pipe_data;
bool stopped;
std::array<u8, 4096> client_data;
@@ -263,7 +298,13 @@ Debugger::Debugger(Core::System& system, u16 port) {
Debugger::~Debugger() = default;
bool Debugger::NotifyThreadStopped(Kernel::KThread* thread) {
return impl && impl->NotifyThreadStopped(thread);
return impl && impl->SignalDebugger(SignalInfo{SignalType::Stopped, thread});
}
void Debugger::NotifyShutdown() {
if (impl) {
impl->SignalDebugger(SignalInfo{SignalType::ShuttingDown, nullptr});
}
}
} // namespace Core

View File

@@ -35,6 +35,11 @@ public:
*/
bool NotifyThreadStopped(Kernel::KThread* thread);
/**
* Notify the debugger that a shutdown is being performed now and disconnect.
*/
void NotifyShutdown();
private:
std::unique_ptr<DebuggerImpl> impl;
};

View File

@@ -66,6 +66,11 @@ public:
*/
virtual void Stopped(Kernel::KThread* thread) = 0;
/**
* Called when emulation is shutting down.
*/
virtual void ShuttingDown() = 0;
/**
* Called when new data is asynchronously received on the client socket.
* A list of actions to perform is returned.

View File

@@ -106,6 +106,8 @@ GDBStub::~GDBStub() = default;
void GDBStub::Connected() {}
void GDBStub::ShuttingDown() {}
void GDBStub::Stopped(Kernel::KThread* thread) {
SendReply(arch->ThreadStatus(thread, GDB_STUB_SIGTRAP));
}

View File

@@ -23,6 +23,7 @@ public:
void Connected() override;
void Stopped(Kernel::KThread* thread) override;
void ShuttingDown() override;
std::vector<DebuggerAction> ClientData(std::span<const u8> data) override;
private:

View File

@@ -419,7 +419,7 @@ std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type
Core::Crypto::Mode::ECB);
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
Core::Crypto::Key128 out;
Core::Crypto::Key128 out{};
if (type == NCASectionCryptoType::XTS) {
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {

View File

@@ -50,7 +50,7 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
low = mid + 1;
}
}
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
return {0, 0};
}
} // Anonymous namespace

View File

@@ -108,7 +108,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
ASSERT_MSG(false, "Invalid NCAContentType={:02X}", type);
return ContentRecordType{};
}
}

View File

@@ -144,7 +144,7 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
}
} else {
UNREACHABLE();
ASSERT(false);
return nullptr;
}

View File

@@ -65,7 +65,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
controller->Connect(true);
} else {
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
ASSERT_MSG(false, "Unable to add a new controller based on the given parameters!");
}
}

View File

@@ -48,7 +48,7 @@ EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type) {
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}
@@ -77,7 +77,7 @@ const EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type
return handheld.get();
case NpadIdType::Invalid:
default:
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
return nullptr;
}
}

View File

@@ -141,7 +141,7 @@ public:
if (index < DomainHandlerCount()) {
domain_handlers[index] = nullptr;
} else {
UNREACHABLE_MSG("Unexpected handler index {}", index);
ASSERT_MSG(false, "Unexpected handler index {}", index);
}
}

View File

@@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
}
// If we've hit the end of a gap, free it.

View File

@@ -35,7 +35,7 @@ public:
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}
@@ -49,7 +49,7 @@ public:
case Svc::ArbitrationType::WaitIfEqual:
return WaitIfEqual(addr, value, timeout);
}
UNREACHABLE();
ASSERT(false);
return ResultUnknown;
}

View File

@@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
UNREACHABLE();
ASSERT(false);
return 0;
}
@@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
ASSERT(IsAllowed39BitType(type));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
UNREACHABLE();
ASSERT(false);
return 0;
}

View File

@@ -18,7 +18,7 @@ namespace Kernel {
class KernelCore;
class KProcess;
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
\
private: \
friend class ::Kernel::KClassTokenGenerator; \
@@ -40,16 +40,19 @@ public:
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
virtual TypeObj GetTypeObj() const { \
virtual TypeObj GetTypeObj() ATTRIBUTE { \
return GetStaticTypeObj(); \
} \
virtual const char* GetTypeName() const { \
virtual const char* GetTypeName() ATTRIBUTE { \
return GetStaticTypeName(); \
} \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
class KAutoObject {
protected:
class TypeObj {
@@ -82,7 +85,7 @@ protected:
};
private:
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {

View File

@@ -49,6 +49,7 @@ private:
}
}
}
UNREACHABLE();
}();
template <typename T>

View File

@@ -27,23 +27,18 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
m_page_group =
KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
m_page_group = {};
// Lock the memory.
R_TRY(page_table.LockForCodeMemory(addr, size))
R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
// Clear the memory.
//
// FIXME: this ends up clobbering address ranges outside the scope of the mapping within
// guest memory, and is not specifically required if the guest program is correctly
// written, so disable until this is further investigated.
//
// for (const auto& block : m_page_group.Nodes()) {
// std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
// }
for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.
m_owner->Open();
m_address = addr;
m_is_initialized = true;
m_is_owner_mapped = false;
@@ -57,8 +52,14 @@ void KCodeMemory::Finalize() {
// Unlock.
if (!m_is_mapped && !m_is_owner_mapped) {
const size_t size = m_page_group.GetNumPages() * PageSize;
m_owner->PageTable().UnlockForCodeMemory(m_address, size);
m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
}
// Close the page group.
m_page_group = {};
// Close our reference to our owner.
m_owner->Close();
}
ResultCode KCodeMemory::Map(VAddr address, size_t size) {
@@ -118,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
k_perm = KMemoryPermission::UserReadExecute;
break;
default:
break;
// Already validated by ControlCodeMemory svc
UNREACHABLE();
}
// Map the memory.

View File

@@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool::SystemNonSecure;
} else {
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
return {};
}
}

View File

@@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
case FileSys::ProgramAddressSpaceType::Is39Bit:
return 39;
default:
UNREACHABLE();
ASSERT(false);
return {};
}
}
@@ -128,7 +128,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
const std::size_t needed_size{
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
UNREACHABLE();
ASSERT(false);
return ResultOutOfMemory;
}
@@ -542,6 +542,95 @@ ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num
return ResultSuccess;
}
bool KPageTable::IsValidPageGroup(const KPageLinkedList& pg_ll, VAddr addr, size_t num_pages) {
ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
const auto& pg = pg_ll.Nodes();
const auto& memory_layout = system.Kernel().MemoryLayout();
// Empty groups are necessarily invalid.
if (pg.empty()) {
return false;
}
// We're going to validate that the group we'd expect is the group we see.
auto cur_it = pg.begin();
PAddr cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() {
if (cur_block_pages == 0) {
if ((++cur_it) == pg.end()) {
return false;
}
cur_block_address = cur_it->GetAddress();
cur_block_pages = cur_it->GetNumPages();
}
return true;
};
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
return false;
}
// Prepare tracking variables.
PAddr cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
size_t tot_size = cur_size;
// Iterate, comparing expected to actual.
while (tot_size < size) {
if (!page_table_impl.ContinueTraversal(next_entry, context)) {
return false;
}
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
return false;
}
cur_block_address += cur_size;
cur_block_pages -= cur_pages;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
// Ensure we compare the right amount for the last block.
if (tot_size > size) {
cur_size -= (tot_size - size);
}
if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
KScopedLightLock lk(general_lock);
@@ -1341,7 +1430,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
new_state = KMemoryState::AliasCodeData;
break;
default:
UNREACHABLE();
ASSERT(false);
}
}
@@ -1687,22 +1776,22 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
ResultCode KPageTable::LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size) {
return this->LockMemoryAndOpen(
nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::All, KMemoryAttribute::None,
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
KMemoryAttribute::Locked);
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Locked, nullptr);
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size,
const KPageLinkedList& pg) {
return this->UnlockMemory(
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1734,9 +1823,7 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
if (!paddr) {
UNREACHABLE();
}
ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
@@ -1767,7 +1854,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
break;
default:
UNREACHABLE();
ASSERT(false);
}
addr += size;
@@ -1798,7 +1885,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
case OperationType::ChangePermissionsAndRefresh:
break;
default:
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;
}
@@ -1835,7 +1922,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
return code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -1871,7 +1957,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
return code_region_end - code_region_start;
default:
UNREACHABLE();
return {};
}
}
@@ -2125,7 +2210,7 @@ ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_
// Check the page group.
if (pg != nullptr) {
UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
}
// Decide on new perm and attr.

View File

@@ -72,8 +72,8 @@ public:
KMemoryPermission perm, PAddr map_addr = 0);
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
ResultCode LockForCodeMemory(KPageLinkedList* out, VAddr addr, std::size_t size);
ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageLinkedList& pg);
ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
KMemoryState state_mask, KMemoryState state,
KMemoryPermission perm_mask, KMemoryPermission perm,
@@ -178,6 +178,7 @@ private:
const KPageLinkedList* pg);
ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsValidPageGroup(const KPageLinkedList& pg, VAddr addr, size_t num_pages);
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();

View File

@@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
session_ptr->ClientConnected(server.AcceptSession());
} else {
UNREACHABLE();
ASSERT(false);
}
return ResultSuccess;

View File

@@ -350,7 +350,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
break;
default:
UNREACHABLE();
ASSERT(false);
}
// Create TLS region

View File

@@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
"object_id {} is too big! This probably means a recent service call "
"to {} needed to return a new interface!",
object_id, name);
UNREACHABLE();
ASSERT(false);
return ResultSuccess; // Ignore error if asserts are off
}
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
return strong_ptr->HandleSyncRequest(*this, context);
} else {
UNREACHABLE();
ASSERT(false);
return ResultSuccess;
}

View File

@@ -133,7 +133,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNIMPLEMENTED();
break;
default:
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
thread_type = type;

View File

@@ -212,7 +212,9 @@ struct KernelCore::Impl {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
const auto total_size{sizes.first};
const auto kernel_size{sizes.second};
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -252,6 +254,7 @@ struct KernelCore::Impl {
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
suspend_threads[core_id]->DisableDispatch();
}
}
@@ -1073,9 +1076,6 @@ void KernelCore::Suspend(bool in_suspention) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
if (!should_suspend) {
impl->suspend_threads[core_id]->DisableDispatch();
}
}
}
}

View File

@@ -1876,7 +1876,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
KScheduler::YieldToAnyThread(kernel);
} else {
// Nintendo does nothing at all if an otherwise invalid value is passed.
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}

View File

@@ -178,7 +178,7 @@ ResultCode Controller::GetStatus() const {
}
void Controller::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void Controller::Execute() {

View File

@@ -156,7 +156,7 @@ ResultCode Error::GetStatus() const {
}
void Error::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data!");
ASSERT_MSG(false, "Unexpected interactive applet data!");
}
void Error::Execute() {

View File

@@ -76,7 +76,7 @@ ResultCode Auth::GetStatus() const {
}
void Auth::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void Auth::Execute() {
@@ -175,7 +175,7 @@ ResultCode PhotoViewer::GetStatus() const {
}
void PhotoViewer::ExecuteInteractive() {
UNREACHABLE_MSG("Unexpected interactive applet data.");
ASSERT_MSG(false, "Unexpected interactive applet data.");
}
void PhotoViewer::Execute() {

View File

@@ -67,7 +67,7 @@ ResultCode MiiEdit::GetStatus() const {
}
void MiiEdit::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void MiiEdit::Execute() {

View File

@@ -44,7 +44,7 @@ ResultCode ProfileSelect::GetStatus() const {
}
void ProfileSelect::ExecuteInteractive() {
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
}
void ProfileSelect::Execute() {

View File

@@ -71,7 +71,7 @@ void SoftwareKeyboard::Initialize() {
InitializeBackground(applet_mode);
break;
default:
UNREACHABLE_MSG("Invalid LibraryAppletMode={}", applet_mode);
ASSERT_MSG(false, "Invalid LibraryAppletMode={}", applet_mode);
break;
}
}

View File

@@ -279,7 +279,7 @@ void WebBrowser::Initialize() {
InitializeLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
break;
}
}
@@ -320,7 +320,7 @@ void WebBrowser::Execute() {
ExecuteLobby();
break;
default:
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
WebBrowserExit(WebExitReason::EndButtonPressed);
break;
}

View File

@@ -899,7 +899,7 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
case FileSys::SaveDataSpaceId::TemporaryStorage:
case FileSys::SaveDataSpaceId::ProperSystem:
case FileSys::SaveDataSpaceId::SafeMode:
UNREACHABLE();
ASSERT(false);
}
auto filesystem = std::make_shared<IFileSystem>(system, std::move(dir.Unwrap()),

View File

@@ -160,7 +160,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
shared_memory->system_properties.raw = 0;
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
shared_memory->style_tag.fullkey.Assign(1);
@@ -422,7 +422,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
libnx_state.connection_status.is_connected.Assign(1);
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::NES:
@@ -597,7 +597,7 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
switch (controller_type) {
case Core::HID::NpadStyleIndex::None:
UNREACHABLE();
ASSERT(false);
break;
case Core::HID::NpadStyleIndex::ProController:
case Core::HID::NpadStyleIndex::Pokeball:
@@ -856,7 +856,7 @@ void Controller_NPad::VibrateController(
}
if (vibration_device_handle.device_index == Core::HID::DeviceIndex::None) {
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
return;
}

View File

@@ -1441,7 +1441,7 @@ void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
break;
case Core::HID::DeviceIndex::None:
default:
UNREACHABLE_MSG("DeviceIndex should never be None!");
ASSERT_MSG(false, "DeviceIndex should never be None!");
vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
break;
}

View File

@@ -347,7 +347,7 @@ public:
}
if (!succeeded) {
UNREACHABLE_MSG("Out of address space!");
ASSERT_MSG(false, "Out of address space!");
return Kernel::ResultOutOfMemory;
}

View File

@@ -290,7 +290,7 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
u8 glasses_type{};
while (glasses_type_start < glasses_type_info.values[glasses_type]) {
if (++glasses_type >= glasses_type_info.values_count) {
UNREACHABLE();
ASSERT(false);
break;
}
}

View File

@@ -23,7 +23,7 @@ u32 SyncpointManager::AllocateSyncpoint() {
return syncpoint_id;
}
}
UNREACHABLE_MSG("No more available syncpoints!");
ASSERT_MSG(false, "No more available syncpoints!");
return {};
}

View File

@@ -659,7 +659,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
value = core->consumer_usage_bit;
break;
default:
UNREACHABLE();
ASSERT(false);
return Status::BadValue;
}

View File

@@ -48,12 +48,12 @@ ResultCode StandardUserSystemClockCore::GetClockContext(Core::System& system,
}
ResultCode StandardUserSystemClockCore::Flush(const SystemClockContext&) {
UNREACHABLE();
UNIMPLEMENTED();
return ERROR_NOT_IMPLEMENTED;
}
ResultCode StandardUserSystemClockCore::SetClockContext(const SystemClockContext&) {
UNREACHABLE();
UNIMPLEMENTED();
return ERROR_NOT_IMPLEMENTED;
}

View File

@@ -111,7 +111,7 @@ struct TimeManager::Impl final {
FileSys::VirtualFile& vfs_file) {
if (time_zone_content_manager.GetTimeZoneManager().SetDeviceLocationNameWithTimeZoneRule(
location_name, vfs_file) != ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -155,7 +155,7 @@ struct TimeManager::Impl final {
} else {
if (standard_local_system_clock_core.SetCurrentTime(system_, posix_time) !=
ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
}
@@ -170,7 +170,7 @@ struct TimeManager::Impl final {
if (standard_network_system_clock_core.SetSystemClockContext(clock_context) !=
ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -183,7 +183,7 @@ struct TimeManager::Impl final {
Clock::SteadyClockTimePoint steady_clock_time_point) {
if (standard_user_system_clock_core.SetAutomaticCorrectionEnabled(
system_, is_automatic_correction_enabled) != ResultSuccess) {
UNREACHABLE();
ASSERT(false);
return;
}
@@ -203,7 +203,7 @@ struct TimeManager::Impl final {
if (GetStandardLocalSystemClockCore()
.SetCurrentTime(system_, timespan.ToSeconds())
.IsError()) {
UNREACHABLE();
ASSERT(false);
return;
}
}

View File

@@ -279,7 +279,7 @@ static constexpr int TransitionTime(int year, Rule rule, int offset) {
break;
}
default:
UNREACHABLE();
ASSERT(false);
}
return value + rule.transition_time + offset;
}

View File

@@ -128,11 +128,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
// Apply patches if necessary
if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) {
std::vector<u8> pi_header;
pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header),
reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader));
pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.data(),
program_image.data() + program_image.size());
std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size());
std::memcpy(pi_header.data(), &nso_header, sizeof(NSOHeader));
std::memcpy(pi_header.data() + sizeof(NSOHeader), program_image.data(),
program_image.size());
pi_header = pm->PatchNSO(pi_header, nso_file.GetName());

View File

@@ -25,7 +25,6 @@ u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
return memory.Read64(addr);
default:
UNREACHABLE();
return 0;
}
}

View File

@@ -58,7 +58,7 @@ public:
[[nodiscard]] Stack Remove(Token token) const;
private:
boost::container::small_vector<StackEntry, 3> entries;
std::vector<StackEntry> entries;
};
struct IndirectBranch {

View File

@@ -975,13 +975,7 @@ private:
Environment& env;
IR::AbstractSyntaxList& syntax_list;
bool uses_demote_to_helper{};
// TODO: C++20 Remove this when all compilers support constexpr std::vector
#if __cpp_lib_constexpr_vector >= 201907
static constexpr Flow::Block dummy_flow_block;
#else
const Flow::Block dummy_flow_block;
#endif
};
} // Anonymous namespace

View File

@@ -224,7 +224,7 @@ void Codec::Decode() {
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
return vp9_decoder->GetFrameBytes();
default:
UNREACHABLE();
ASSERT(false);
return std::vector<u8>{};
}
}();

View File

@@ -228,7 +228,7 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
break;
}
default:
UNREACHABLE();
ASSERT(false);
break;
}
gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),

View File

@@ -202,7 +202,7 @@ public:
case Size::Size_11_11_10:
return 3;
default:
UNREACHABLE();
ASSERT(false);
return 1;
}
}
@@ -238,7 +238,7 @@ public:
case Size::Size_11_11_10:
return 4;
default:
UNREACHABLE();
ASSERT(false);
return 1;
}
}
@@ -274,7 +274,7 @@ public:
case Size::Size_11_11_10:
return "11_11_10";
default:
UNREACHABLE();
ASSERT(false);
return {};
}
}
@@ -296,7 +296,7 @@ public:
case Type::Float:
return "FLOAT";
}
UNREACHABLE();
ASSERT(false);
return {};
}
@@ -336,7 +336,7 @@ public:
case 3:
return {x3, y3};
default:
UNREACHABLE();
ASSERT(false);
return {0, 0};
}
}
@@ -1193,7 +1193,7 @@ public:
case IndexFormat::UnsignedInt:
return 4;
}
UNREACHABLE();
ASSERT(false);
return 1;
}

View File

@@ -62,7 +62,7 @@ void MaxwellDMA::Launch() {
if (!is_src_pitch && !is_dst_pitch) {
// If both the source and the destination are in block layout, assert.
UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented");
UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
return;
}
@@ -260,7 +260,7 @@ void MaxwellDMA::ReleaseSemaphore() {
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
break;
default:
UNREACHABLE_MSG("Unknown semaphore type: {}", static_cast<u32>(type.Value()));
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
}
}

View File

@@ -50,7 +50,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
} else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) {
rasterizer->OnCPUWrite(invalidate->addr, invalidate->size);
} else {
UNREACHABLE();
ASSERT(false);
}
state.signaled_fence.store(next.fence);
if (next.block) {

View File

@@ -71,7 +71,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
}
}
if (!mid_method.has_value()) {
UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
ASSERT_MSG(false, "Macro 0x{0:x} was not uploaded", method);
return;
}
}

View File

@@ -308,7 +308,6 @@ bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond,
return value != 0;
}
UNREACHABLE();
return true;
}
Macro::Opcode MacroInterpreterImpl::GetOpcode() const {

View File

@@ -411,7 +411,7 @@ void MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
Xbyak::Label end;
auto value = Compile_GetRegister(opcode.src_a, eax);
test(value, value);
cmp(value, 0); // test(value, value);
if (optimizer.has_delayed_pc) {
switch (opcode.branch_condition) {
case Macro::BranchCondition::Zero:

View File

@@ -67,7 +67,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
ASSERT(it->first == gpu_addr);
map_ranges.erase(it);
} else {
UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr);
ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
}
const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
@@ -206,7 +206,7 @@ T MemoryManager::Read(GPUVAddr addr) const {
return value;
}
UNREACHABLE();
ASSERT(false);
return {};
}
@@ -219,7 +219,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
return;
}
UNREACHABLE();
ASSERT(false);
}
template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;

View File

@@ -48,7 +48,7 @@ GLenum Stage(size_t stage_index) {
case 4:
return GL_FRAGMENT_SHADER;
}
UNREACHABLE_MSG("{}", stage_index);
ASSERT_MSG(false, "{}", stage_index);
return GL_NONE;
}
@@ -65,7 +65,7 @@ GLenum AssemblyStage(size_t stage_index) {
case 4:
return GL_FRAGMENT_PROGRAM_NV;
}
UNREACHABLE_MSG("{}", stage_index);
ASSERT_MSG(false, "{}", stage_index);
return GL_NONE;
}

View File

@@ -85,7 +85,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
case Maxwell::TessellationPrimitive::Quads:
return Shader::TessPrimitive::Quads;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessPrimitive::Triangles;
}();
info.tess_spacing = [&] {
@@ -97,7 +97,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
case Maxwell::TessellationSpacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessSpacing::Equal;
}();
break;

View File

@@ -83,7 +83,7 @@ GLenum ImageTarget(const VideoCommon::ImageInfo& info) {
case ImageType::Buffer:
return GL_TEXTURE_BUFFER;
}
UNREACHABLE_MSG("Invalid image type={}", info.type);
ASSERT_MSG(false, "Invalid image type={}", info.type);
return GL_NONE;
}
@@ -107,7 +107,7 @@ GLenum ImageTarget(Shader::TextureType type, int num_samples = 1) {
case Shader::TextureType::Buffer:
return GL_TEXTURE_BUFFER;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return GL_NONE;
}
@@ -119,7 +119,7 @@ GLenum TextureMode(PixelFormat format, bool is_first) {
case PixelFormat::S8_UINT_D24_UNORM:
return is_first ? GL_STENCIL_INDEX : GL_DEPTH_COMPONENT;
default:
UNREACHABLE();
ASSERT(false);
return GL_DEPTH_COMPONENT;
}
}
@@ -140,7 +140,7 @@ GLint Swizzle(SwizzleSource source) {
case SwizzleSource::OneFloat:
return GL_ONE;
}
UNREACHABLE_MSG("Invalid swizzle source={}", source);
ASSERT_MSG(false, "Invalid swizzle source={}", source);
return GL_NONE;
}
@@ -197,7 +197,7 @@ GLint ConvertA5B5G5R1_UNORM(SwizzleSource source) {
case SwizzleSource::OneFloat:
return GL_ONE;
}
UNREACHABLE_MSG("Invalid swizzle source={}", source);
ASSERT_MSG(false, "Invalid swizzle source={}", source);
return GL_NONE;
}
@@ -381,10 +381,10 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
glTextureStorage3D(handle, gl_num_levels, gl_internal_format, width, height, depth);
break;
case GL_TEXTURE_BUFFER:
UNREACHABLE();
ASSERT(false);
break;
default:
UNREACHABLE_MSG("Invalid target=0x{:x}", target);
ASSERT_MSG(false, "Invalid target=0x{:x}", target);
break;
}
return texture;
@@ -420,7 +420,7 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
case Shader::ImageFormat::R32G32B32A32_UINT:
return GL_RGBA32UI;
}
UNREACHABLE_MSG("Invalid image format={}", format);
ASSERT_MSG(false, "Invalid image format={}", format);
return GL_R32UI;
}
@@ -579,7 +579,7 @@ void TextureCacheRuntime::EmulateCopyImage(Image& dst, Image& src,
} else if (IsPixelFormatBGR(dst.info.format) || IsPixelFormatBGR(src.info.format)) {
format_conversion_pass.ConvertImage(dst, src, copies);
} else {
UNREACHABLE();
ASSERT(false);
}
}
@@ -620,7 +620,7 @@ void TextureCacheRuntime::AccelerateImageUpload(Image& image, const ImageBufferM
case ImageType::Linear:
return util_shaders.PitchUpload(image, map, swizzles);
default:
UNREACHABLE();
ASSERT(false);
break;
}
}
@@ -639,7 +639,7 @@ FormatProperties TextureCacheRuntime::FormatInfo(ImageType type, GLenum internal
case ImageType::e3D:
return format_properties[2].at(internal_format);
default:
UNREACHABLE();
ASSERT(false);
return FormatProperties{};
}
}
@@ -888,7 +888,7 @@ void Image::CopyBufferToImage(const VideoCommon::BufferImageCopy& copy, size_t b
}
break;
default:
UNREACHABLE();
ASSERT(false);
}
}
@@ -924,7 +924,7 @@ void Image::CopyImageToBuffer(const VideoCommon::BufferImageCopy& copy, size_t b
depth = copy.image_extent.depth;
break;
default:
UNREACHABLE();
ASSERT(false);
}
// Compressed formats don't have a pixel format or type
const bool is_compressed = gl_format == GL_NONE;
@@ -950,7 +950,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return GL_DEPTH_STENCIL_ATTACHMENT;
default:
UNREACHABLE();
ASSERT(false);
return GL_COLOR_ATTACHMENT0;
}
}();
@@ -965,7 +965,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
default:
UNREACHABLE();
ASSERT(false);
return GL_COLOR_BUFFER_BIT;
}
}();
@@ -980,7 +980,7 @@ void Image::Scale(bool up_scale) {
case SurfaceType::DepthStencil:
return 3;
default:
UNREACHABLE();
ASSERT(false);
return 0;
}
}();
@@ -1045,7 +1045,7 @@ bool Image::ScaleUp(bool ignore) {
return false;
}
if (info.type == ImageType::Linear) {
UNREACHABLE();
ASSERT(false);
return false;
}
flags |= ImageFlagBits::Rescaled;
@@ -1139,7 +1139,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
UNIMPLEMENTED();
break;
case ImageViewType::Buffer:
UNREACHABLE();
ASSERT(false);
break;
}
switch (info.type) {
@@ -1319,7 +1319,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
buffer_bits |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
break;
default:
UNREACHABLE();
ASSERT(false);
buffer_bits |= GL_DEPTH_BUFFER_BIT;
break;
}

View File

@@ -206,7 +206,7 @@ inline GLenum IndexFormat(Maxwell::IndexFormat index_format) {
case Maxwell::IndexFormat::UnsignedInt:
return GL_UNSIGNED_INT;
}
UNREACHABLE_MSG("Invalid index_format={}", index_format);
ASSERT_MSG(false, "Invalid index_format={}", index_format);
return {};
}
@@ -243,7 +243,7 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
case Maxwell::PrimitiveTopology::Patches:
return GL_PATCHES;
}
UNREACHABLE_MSG("Invalid topology={}", topology);
ASSERT_MSG(false, "Invalid topology={}", topology);
return GL_POINTS;
}
@@ -271,8 +271,8 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
}
break;
}
UNREACHABLE_MSG("Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
mipmap_filter_mode);
ASSERT_MSG(false, "Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
mipmap_filter_mode);
return GL_NEAREST;
}
@@ -550,7 +550,7 @@ inline GLenum PolygonMode(Maxwell::PolygonMode polygon_mode) {
case Maxwell::PolygonMode::Fill:
return GL_FILL;
}
UNREACHABLE_MSG("Invalid polygon mode={}", polygon_mode);
ASSERT_MSG(false, "Invalid polygon mode={}", polygon_mode);
return GL_FILL;
}
@@ -563,7 +563,7 @@ inline GLenum ReductionFilter(Tegra::Texture::SamplerReduction filter) {
case Tegra::Texture::SamplerReduction::Max:
return GL_MAX;
}
UNREACHABLE_MSG("Invalid reduction filter={}", static_cast<int>(filter));
ASSERT_MSG(false, "Invalid reduction filter={}", static_cast<int>(filter));
return GL_WEIGHTED_AVERAGE_ARB;
}

View File

@@ -79,7 +79,7 @@ const char* GetSource(GLenum source) {
case GL_DEBUG_SOURCE_OTHER:
return "OTHER";
default:
UNREACHABLE();
ASSERT(false);
return "Unknown source";
}
}
@@ -101,7 +101,7 @@ const char* GetType(GLenum type) {
case GL_DEBUG_TYPE_MARKER:
return "MARKER";
default:
UNREACHABLE();
ASSERT(false);
return "Unknown type";
}
}

View File

@@ -282,7 +282,7 @@ GLenum StoreFormat(u32 bytes_per_block) {
case 16:
return GL_RGBA32UI;
}
UNREACHABLE();
ASSERT(false);
return GL_R8UI;
}

View File

@@ -25,7 +25,7 @@ VkFilter Filter(Tegra::Texture::TextureFilter filter) {
case Tegra::Texture::TextureFilter::Linear:
return VK_FILTER_LINEAR;
}
UNREACHABLE_MSG("Invalid sampler filter={}", filter);
ASSERT_MSG(false, "Invalid sampler filter={}", filter);
return {};
}
@@ -42,7 +42,7 @@ VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter
case Tegra::Texture::TextureMipmapFilter::Linear:
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
}
UNREACHABLE_MSG("Invalid sampler mipmap mode={}", mipmap_filter);
ASSERT_MSG(false, "Invalid sampler mipmap mode={}", mipmap_filter);
return {};
}
@@ -70,7 +70,7 @@ VkSamplerAddressMode WrapMode(const Device& device, Tegra::Texture::WrapMode wra
case Tegra::Texture::TextureFilter::Linear:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
}
UNREACHABLE();
ASSERT(false);
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
@@ -744,7 +744,7 @@ VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle)
case Maxwell::ViewportSwizzle::NegativeW:
return VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV;
}
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
return {};
}
@@ -757,7 +757,7 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
case Tegra::Texture::SamplerReduction::Max:
return VK_SAMPLER_REDUCTION_MODE_MAX_EXT;
}
UNREACHABLE_MSG("Invalid sampler mode={}", static_cast<int>(reduction));
ASSERT_MSG(false, "Invalid sampler mode={}", static_cast<int>(reduction));
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
}
@@ -780,7 +780,7 @@ VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
case Tegra::Texture::MsaaMode::Msaa4x4:
return VK_SAMPLE_COUNT_16_BIT;
default:
UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
ASSERT_MSG(false, "Invalid msaa_mode={}", static_cast<int>(msaa_mode));
return VK_SAMPLE_COUNT_1_BIT;
}
}

View File

@@ -46,7 +46,7 @@ size_t BytesPerIndex(VkIndexType index_type) {
case VK_INDEX_TYPE_UINT32:
return 4;
default:
UNREACHABLE_MSG("Invalid index type={}", index_type);
ASSERT_MSG(false, "Invalid index type={}", index_type);
return 1;
}
}
@@ -366,7 +366,7 @@ void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle
std::memcpy(staging_data, MakeQuadIndices<u32>(quad, first).data(), quad_size);
break;
default:
UNREACHABLE();
ASSERT(false);
break;
}
staging_data += quad_size;

View File

@@ -265,7 +265,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedInt:
return 2;
}
UNREACHABLE();
ASSERT(false);
return 2;
}();
const u32 input_size = num_vertices << index_shift;
@@ -333,7 +333,7 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
const VkImageMemoryBarrier image_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = is_initialized ? VK_ACCESS_SHADER_WRITE_BIT : VkAccessFlags{},
.srcAccessMask = is_initialized ? VK_ACCESS_SHADER_WRITE_BIT : VK_ACCESS_NONE,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
.oldLayout = is_initialized ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,

View File

@@ -174,7 +174,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
case Maxwell::TessellationPrimitive::Quads:
return Shader::TessPrimitive::Quads;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessPrimitive::Triangles;
}();
info.tess_spacing = [&] {
@@ -187,7 +187,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
case Maxwell::TessellationSpacing::FractionalEven:
return Shader::TessSpacing::FractionalEven;
}
UNREACHABLE();
ASSERT(false);
return Shader::TessSpacing::Equal;
}();
break;

View File

@@ -263,7 +263,7 @@ StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage
case MemoryUsage::Download:
return download_cache;
default:
UNREACHABLE_MSG("Invalid memory usage={}", usage);
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return upload_cache;
}
}

View File

@@ -70,7 +70,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case ImageType::Buffer:
break;
}
UNREACHABLE_MSG("Invalid image type={}", type);
ASSERT_MSG(false, "Invalid image type={}", type);
return {};
}
@@ -87,7 +87,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case 16:
return VK_SAMPLE_COUNT_16_BIT;
default:
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
return VK_SAMPLE_COUNT_1_BIT;
}
}
@@ -107,7 +107,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
break;
default:
UNREACHABLE_MSG("Invalid surface type");
ASSERT_MSG(false, "Invalid surface type");
}
}
if (info.storage) {
@@ -179,7 +179,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case VideoCore::Surface::SurfaceType::DepthStencil:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
default:
UNREACHABLE_MSG("Invalid surface type");
ASSERT_MSG(false, "Invalid surface type");
return VkImageAspectFlags{};
}
}
@@ -221,7 +221,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case SwizzleSource::OneInt:
return VK_COMPONENT_SWIZZLE_ONE;
}
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
return VK_COMPONENT_SWIZZLE_ZERO;
}
@@ -242,10 +242,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case Shader::TextureType::ColorArrayCube:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case Shader::TextureType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
ASSERT_MSG(false, "Texture buffers can't be image views");
return VK_IMAGE_VIEW_TYPE_1D;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return VK_IMAGE_VIEW_TYPE_2D;
}
@@ -269,10 +269,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
UNIMPLEMENTED_MSG("Rect image view");
return VK_IMAGE_VIEW_TYPE_2D;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
ASSERT_MSG(false, "Texture buffers can't be image views");
return VK_IMAGE_VIEW_TYPE_1D;
}
UNREACHABLE_MSG("Invalid image view type={}", type);
ASSERT_MSG(false, "Invalid image view type={}", type);
return VK_IMAGE_VIEW_TYPE_2D;
}
@@ -644,7 +644,7 @@ struct RangedBarrierRange {
case Shader::ImageFormat::R32G32B32A32_UINT:
return VK_FORMAT_R32G32B32A32_UINT;
}
UNREACHABLE_MSG("Invalid image format={}", format);
ASSERT_MSG(false, "Invalid image format={}", format);
return VK_FORMAT_R32_UINT;
}
@@ -1596,7 +1596,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
UNIMPLEMENTED();
break;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE();
ASSERT(false);
break;
}
}
@@ -1822,7 +1822,7 @@ void TextureCacheRuntime::AccelerateImageUpload(
if (IsPixelFormatASTC(image.info.format)) {
return astc_decoder_pass.Assemble(image, map, swizzles);
}
UNREACHABLE();
ASSERT(false);
}
} // namespace Vulkan

View File

@@ -280,7 +280,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
stage_index = 4;
break;
default:
UNREACHABLE_MSG("Invalid program={}", program);
ASSERT_MSG(false, "Invalid program={}", program);
break;
}
const u64 local_size{sph.LocalMemorySize()};

View File

@@ -29,7 +29,7 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
return SurfaceTarget::Texture2DArray;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented texture_type={}", texture_type);
UNREACHABLE();
ASSERT(false);
return SurfaceTarget::Texture2D;
}
}
@@ -48,7 +48,7 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) {
return true;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
UNREACHABLE();
ASSERT(false);
return false;
}
}
@@ -67,7 +67,7 @@ bool SurfaceTargetIsArray(SurfaceTarget target) {
return true;
default:
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
UNREACHABLE();
ASSERT(false);
return false;
}
}

View File

@@ -94,7 +94,7 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
resources.layers = 1;
break;
default:
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
break;
}
if (type != ImageType::Linear) {

View File

@@ -71,7 +71,7 @@ ImageViewInfo::ImageViewInfo(const TICEntry& config, s32 base_layer) noexcept
range.extent.layers = config.Depth() * 6;
break;
default:
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
break;
}
}

View File

@@ -23,7 +23,7 @@ namespace VideoCommon {
case 16:
return {2, 2};
}
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
return {1, 1};
}
@@ -47,7 +47,7 @@ namespace VideoCommon {
case MsaaMode::Msaa4x4:
return 16;
}
UNREACHABLE_MSG("Invalid MSAA mode={}", static_cast<int>(msaa_mode));
ASSERT_MSG(false, "Invalid MSAA mode={}", static_cast<int>(msaa_mode));
return 1;
}

View File

@@ -1485,14 +1485,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
const auto page_it = selected_page_table.find(page);
if (page_it == selected_page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageId>& image_ids = page_it->second;
const auto vector_it = std::ranges::find(image_ids, image_id);
if (vector_it == image_ids.end()) {
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
return;
}
image_ids.erase(vector_it);
@@ -1504,14 +1504,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
const auto page_it = page_table.find(page);
if (page_it == page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageMapId>& image_map_ids = page_it->second;
const auto vector_it = std::ranges::find(image_map_ids, map_id);
if (vector_it == image_map_ids.end()) {
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
page << PAGE_BITS);
return;
}
image_map_ids.erase(vector_it);
@@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
const auto page_it = page_table.find(page);
if (page_it == page_table.end()) {
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
return;
}
std::vector<ImageMapId>& image_map_ids = page_it->second;
@@ -1616,15 +1616,15 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
const GPUVAddr gpu_addr = image.gpu_addr;
const auto alloc_it = image_allocs_table.find(gpu_addr);
if (alloc_it == image_allocs_table.end()) {
UNREACHABLE_MSG("Trying to delete an image alloc that does not exist in address 0x{:x}",
gpu_addr);
ASSERT_MSG(false, "Trying to delete an image alloc that does not exist in address 0x{:x}",
gpu_addr);
return;
}
const ImageAllocId alloc_id = alloc_it->second;
std::vector<ImageId>& alloc_images = slot_image_allocs[alloc_id].images;
const auto alloc_image_it = std::ranges::find(alloc_images, image_id);
if (alloc_image_it == alloc_images.end()) {
UNREACHABLE_MSG("Trying to delete an image that does not exist");
ASSERT_MSG(false, "Trying to delete an image that does not exist");
return;
}
ASSERT_MSG(False(image.flags & ImageFlagBits::Tracked), "Image was not untracked");

View File

@@ -87,7 +87,7 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
BPP_CASE(16)
#undef BPP_CASE
default:
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
}
}
@@ -209,7 +209,7 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
BPP_CASE(16)
#undef BPP_CASE
default:
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
}
}
@@ -230,7 +230,7 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
BPP_CASE(16)
#undef BPP_CASE
default:
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
}
}
@@ -253,7 +253,7 @@ void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 widt
BPP_CASE(16)
#undef BPP_CASE
default:
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
}
}

View File

@@ -738,9 +738,10 @@ VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags
// The wanted format is not supported by hardware, search for alternatives
const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
if (alternatives == nullptr) {
UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
"hardware does not support it",
wanted_format, wanted_usage, format_type);
ASSERT_MSG(false,
"Format={} with usage={} and type={} has no defined alternatives and host "
"hardware does not support it",
wanted_format, wanted_usage, format_type);
return wanted_format;
}
@@ -756,9 +757,10 @@ VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags
}
// No alternatives found, panic
UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
"doesn't support any of the alternatives",
wanted_format, wanted_usage, format_type);
ASSERT_MSG(false,
"Format={} with usage={} and type={} is not supported by the host hardware and "
"doesn't support any of the alternatives",
wanted_format, wanted_usage, format_type);
return wanted_format;
}

View File

@@ -49,7 +49,7 @@ struct Range {
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
}
UNREACHABLE_MSG("Invalid memory usage={}", usage);
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
}
@@ -325,7 +325,7 @@ VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask,
// Remove device local, if it's not supported by the requested resource
return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
}
UNREACHABLE_MSG("No compatible memory types found");
ASSERT_MSG(false, "No compatible memory types found");
return 0;
}
@@ -349,7 +349,7 @@ bool IsHostVisible(MemoryUsage usage) noexcept {
case MemoryUsage::Download:
return true;
}
UNREACHABLE_MSG("Invalid memory usage={}", usage);
ASSERT_MSG(false, "Invalid memory usage={}", usage);
return false;
}

View File

@@ -631,7 +631,7 @@ void QtControllerSelectorDialog::DisableUnsupportedPlayers() {
switch (max_supported_players) {
case 0:
default:
UNREACHABLE();
ASSERT(false);
return;
case 1:
ui->widgetSpacer->hide();

View File

@@ -52,8 +52,8 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system,
: QWebEngineView(parent), input_subsystem{input_subsystem_},
url_interceptor(std::make_unique<UrlRequestInterceptor>()),
input_interpreter(std::make_unique<InputInterpreter>(system)),
default_profile{QWebEngineProfile::defaultProfile()}, global_settings{
default_profile->settings()} {
default_profile{QWebEngineProfile::defaultProfile()},
global_settings{QWebEngineSettings::globalSettings()} {
default_profile->setPersistentStoragePath(QString::fromStdString(Common::FS::PathToUTF8String(
Common::FS::GetYuzuPath(Common::FS::YuzuPath::YuzuDir) / "qtwebengine")));
@@ -78,7 +78,7 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system,
default_profile->scripts()->insert(gamepad);
default_profile->scripts()->insert(window_nx);
default_profile->setUrlRequestInterceptor(url_interceptor.get());
default_profile->setRequestInterceptor(url_interceptor.get());
global_settings->setAttribute(QWebEngineSettings::LocalContentCanAccessRemoteUrls, true);
global_settings->setAttribute(QWebEngineSettings::FullScreenSupportEnabled, true);

View File

@@ -1591,6 +1591,7 @@ void GMainWindow::ShutdownGame() {
AllowOSSleep();
system->DetachDebugger();
discord_rpc->Pause();
emu_thread->RequestStop();

View File

@@ -344,6 +344,8 @@ void Config::ReadValues() {
ReadSetting("Debugging", Settings::values.use_debug_asserts);
ReadSetting("Debugging", Settings::values.use_auto_stub);
ReadSetting("Debugging", Settings::values.disable_macro_jit);
ReadSetting("Debugging", Settings::values.use_gdbstub);
ReadSetting("Debugging", Settings::values.gdbstub_port);
const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
std::stringstream ss(title_list);

View File

@@ -437,6 +437,11 @@ disable_macro_jit=false
# Presents guest frames as they become available. Experimental.
# false: Disabled (default), true: Enabled
disable_fps_limit=false
# Determines whether to enable the GDB stub and wait for the debugger to attach before running.
# false: Disabled (default), true: Enabled
use_gdbstub=false
# The port to use for the GDB server, if it is enabled.
gdbstub_port=6543
[WebService]
# Whether or not to enable telemetry

View File

@@ -162,7 +162,15 @@ void EmuWindow_SDL2::WaitEvent() {
SDL_Event event;
if (!SDL_WaitEvent(&event)) {
LOG_CRITICAL(Frontend, "SDL_WaitEvent failed: {}", SDL_GetError());
const char* error = SDL_GetError();
if (!error || strcmp(error, "") == 0) {
// https://github.com/libsdl-org/SDL/issues/5780
// Sometimes SDL will return without actually having hit an error condition;
// just ignore it in this case.
return;
}
LOG_CRITICAL(Frontend, "SDL_WaitEvent failed: {}", error);
exit(1);
}

View File

@@ -217,10 +217,19 @@ int main(int argc, char** argv) {
[](VideoCore::LoadCallbackStage, size_t value, size_t total) {});
}
system.RegisterExitCallback([&] {
// Just exit right away.
exit(0);
});
void(system.Run());
if (system.DebuggerEnabled()) {
system.InitializeDebugger();
}
while (emu_window->IsOpen()) {
emu_window->WaitEvent();
}
system.DetachDebugger();
void(system.Pause());
system.Shutdown();