Compare commits
12 Commits
mainline-0
...
mainline-0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb4ad700b9 | ||
|
|
81b5be9a02 | ||
|
|
d7e7fabfed | ||
|
|
5deb386345 | ||
|
|
d9adcc3133 | ||
|
|
4e232765ef | ||
|
|
a51416cadc | ||
|
|
cc2f2d6fd8 | ||
|
|
52ceb0eaee | ||
|
|
8a5dff44e7 | ||
|
|
bb8d924ee6 | ||
|
|
ffcda8a48e |
3
externals/CMakeLists.txt
vendored
3
externals/CMakeLists.txt
vendored
@@ -42,9 +42,6 @@ target_include_directories(mbedtls PUBLIC ./mbedtls/include)
|
||||
add_library(microprofile INTERFACE)
|
||||
target_include_directories(microprofile INTERFACE ./microprofile)
|
||||
|
||||
# Open Source Archives
|
||||
add_subdirectory(open_source_archives EXCLUDE_FROM_ALL)
|
||||
|
||||
# Unicorn
|
||||
add_library(unicorn-headers INTERFACE)
|
||||
target_include_directories(unicorn-headers INTERFACE ./unicorn/include)
|
||||
|
||||
16
externals/open_source_archives/CMakeLists.txt
vendored
16
externals/open_source_archives/CMakeLists.txt
vendored
@@ -1,16 +0,0 @@
|
||||
add_library(open_source_archives
|
||||
src/FontChineseSimplified.cpp
|
||||
src/FontChineseTraditional.cpp
|
||||
src/FontExtendedChineseSimplified.cpp
|
||||
src/FontKorean.cpp
|
||||
src/FontNintendoExtended.cpp
|
||||
src/FontStandard.cpp
|
||||
include/FontChineseSimplified.h
|
||||
include/FontChineseTraditional.h
|
||||
include/FontExtendedChineseSimplified.h
|
||||
include/FontKorean.h
|
||||
include/FontNintendoExtended.h
|
||||
include/FontStandard.h
|
||||
)
|
||||
|
||||
target_include_directories(open_source_archives PUBLIC include)
|
||||
4
externals/open_source_archives/Readme.md
vendored
4
externals/open_source_archives/Readme.md
vendored
@@ -1,4 +0,0 @@
|
||||
These files were generated by https://github.com/FearlessTobi/yuzu_system_archives at git commit 0a24b0c9f38d71fb2c4bba5645a39029e539a5ec. To generate the files use the run.sh inside that repository.
|
||||
|
||||
The follwing system archives are currently included:
|
||||
- JPN/EUR/USA System Font
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 217276> FontChineseSimplified;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 222236> FontChineseTraditional;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 293516> FontExtendedChineseSimplified;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 217276> FontKorean;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 172064> FontNintendoExtended;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
extern const std::array<unsigned char, 217276> FontStandard;
|
||||
|
||||
18112
externals/open_source_archives/src/FontChineseSimplified.cpp
vendored
18112
externals/open_source_archives/src/FontChineseSimplified.cpp
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
18112
externals/open_source_archives/src/FontKorean.cpp
vendored
18112
externals/open_source_archives/src/FontKorean.cpp
vendored
File diff suppressed because it is too large
Load Diff
14344
externals/open_source_archives/src/FontNintendoExtended.cpp
vendored
14344
externals/open_source_archives/src/FontNintendoExtended.cpp
vendored
File diff suppressed because it is too large
Load Diff
18112
externals/open_source_archives/src/FontStandard.cpp
vendored
18112
externals/open_source_archives/src/FontStandard.cpp
vendored
File diff suppressed because it is too large
Load Diff
@@ -28,18 +28,14 @@ __declspec(noinline, noreturn)
|
||||
}
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed!"); \
|
||||
}
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
} \
|
||||
while (0)
|
||||
if (!(_a_)) { \
|
||||
LOG_CRITICAL(Debug, "Assertion Failed! " __VA_ARGS__); \
|
||||
}
|
||||
|
||||
#define UNREACHABLE() ASSERT_MSG(false, "Unreachable code!")
|
||||
#define UNREACHABLE_MSG(...) ASSERT_MSG(false, __VA_ARGS__)
|
||||
|
||||
@@ -304,6 +304,13 @@ public:
|
||||
return levels[priority == Depth ? 63 : priority].back();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
used_priorities = 0;
|
||||
for (std::size_t i = 0; i < Depth; i++) {
|
||||
levels[i].clear();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
using const_list_iterator = typename std::list<T>::const_iterator;
|
||||
|
||||
|
||||
@@ -74,10 +74,24 @@ add_library(core STATIC
|
||||
file_sys/sdmc_factory.h
|
||||
file_sys/submission_package.cpp
|
||||
file_sys/submission_package.h
|
||||
file_sys/system_archive/data/font_chinese_simplified.cpp
|
||||
file_sys/system_archive/data/font_chinese_simplified.h
|
||||
file_sys/system_archive/data/font_chinese_traditional.cpp
|
||||
file_sys/system_archive/data/font_chinese_traditional.h
|
||||
file_sys/system_archive/data/font_extended_chinese_simplified.cpp
|
||||
file_sys/system_archive/data/font_extended_chinese_simplified.h
|
||||
file_sys/system_archive/data/font_korean.cpp
|
||||
file_sys/system_archive/data/font_korean.h
|
||||
file_sys/system_archive/data/font_nintendo_extended.cpp
|
||||
file_sys/system_archive/data/font_nintendo_extended.h
|
||||
file_sys/system_archive/data/font_standard.cpp
|
||||
file_sys/system_archive/data/font_standard.h
|
||||
file_sys/system_archive/mii_model.cpp
|
||||
file_sys/system_archive/mii_model.h
|
||||
file_sys/system_archive/ng_word.cpp
|
||||
file_sys/system_archive/ng_word.h
|
||||
file_sys/system_archive/shared_font.cpp
|
||||
file_sys/system_archive/shared_font.h
|
||||
file_sys/system_archive/system_archive.cpp
|
||||
file_sys/system_archive/system_archive.h
|
||||
file_sys/system_archive/system_version.cpp
|
||||
@@ -511,7 +525,7 @@ add_library(core STATIC
|
||||
create_target_directory_groups(core)
|
||||
|
||||
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn open_source_archives)
|
||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn)
|
||||
|
||||
if (YUZU_ENABLE_BOXCAT)
|
||||
get_directory_property(OPENSSL_LIBS
|
||||
|
||||
@@ -116,7 +116,7 @@ public:
|
||||
num_interpreted_instructions = 0;
|
||||
}
|
||||
u64 GetTicksRemaining() override {
|
||||
return std::max(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
return std::max(parent.system.CoreTiming().GetDowncount(), s64{0});
|
||||
}
|
||||
u64 GetCNTPCT() override {
|
||||
return Timing::CpuCyclesToClockCycles(parent.system.CoreTiming().GetTicks());
|
||||
|
||||
@@ -156,7 +156,7 @@ void ARM_Unicorn::Run() {
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
ExecuteInstructions(std::max(4000000, 0));
|
||||
} else {
|
||||
ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), 0));
|
||||
ExecuteInstructions(std::max(system.CoreTiming().GetDowncount(), s64{0}));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -256,6 +256,8 @@ struct System::Impl {
|
||||
is_powered_on = false;
|
||||
exit_lock = false;
|
||||
|
||||
gpu_core->WaitIdle();
|
||||
|
||||
// Shutdown emulation session
|
||||
renderer.reset();
|
||||
GDBStub::Shutdown();
|
||||
@@ -404,6 +406,12 @@ void System::PrepareReschedule() {
|
||||
CurrentCpuCore().PrepareReschedule();
|
||||
}
|
||||
|
||||
void System::PrepareReschedule(const u32 core_index) {
|
||||
if (core_index < GlobalScheduler().CpuCoresCount()) {
|
||||
CpuCore(core_index).PrepareReschedule();
|
||||
}
|
||||
}
|
||||
|
||||
PerfStatsResults System::GetAndResetPerfStats() {
|
||||
return impl->GetAndResetPerfStats();
|
||||
}
|
||||
@@ -444,6 +452,16 @@ const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
|
||||
return CpuCore(core_index).Scheduler();
|
||||
}
|
||||
|
||||
/// Gets the global scheduler
|
||||
Kernel::GlobalScheduler& System::GlobalScheduler() {
|
||||
return impl->kernel.GlobalScheduler();
|
||||
}
|
||||
|
||||
/// Gets the global scheduler
|
||||
const Kernel::GlobalScheduler& System::GlobalScheduler() const {
|
||||
return impl->kernel.GlobalScheduler();
|
||||
}
|
||||
|
||||
Kernel::Process* System::CurrentProcess() {
|
||||
return impl->kernel.CurrentProcess();
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ class VfsFilesystem;
|
||||
} // namespace FileSys
|
||||
|
||||
namespace Kernel {
|
||||
class GlobalScheduler;
|
||||
class KernelCore;
|
||||
class Process;
|
||||
class Scheduler;
|
||||
@@ -184,6 +185,9 @@ public:
|
||||
/// Prepare the core emulation for a reschedule
|
||||
void PrepareReschedule();
|
||||
|
||||
/// Prepare the core emulation for a reschedule
|
||||
void PrepareReschedule(u32 core_index);
|
||||
|
||||
/// Gets and resets core performance statistics
|
||||
PerfStatsResults GetAndResetPerfStats();
|
||||
|
||||
@@ -238,6 +242,12 @@ public:
|
||||
/// Gets the scheduler for the CPU core with the specified index
|
||||
const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
|
||||
|
||||
/// Gets the global scheduler
|
||||
Kernel::GlobalScheduler& GlobalScheduler();
|
||||
|
||||
/// Gets the global scheduler
|
||||
const Kernel::GlobalScheduler& GlobalScheduler() const;
|
||||
|
||||
/// Provides a pointer to the current process
|
||||
Kernel::Process* CurrentProcess();
|
||||
|
||||
|
||||
@@ -52,7 +52,8 @@ bool CpuBarrier::Rendezvous() {
|
||||
|
||||
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
|
||||
std::size_t core_index)
|
||||
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
|
||||
: cpu_barrier{cpu_barrier}, global_scheduler{system.GlobalScheduler()},
|
||||
core_timing{system.CoreTiming()}, core_index{core_index} {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
|
||||
#else
|
||||
@@ -60,7 +61,7 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
|
||||
LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
|
||||
#endif
|
||||
|
||||
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface);
|
||||
scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface, core_index);
|
||||
}
|
||||
|
||||
Cpu::~Cpu() = default;
|
||||
@@ -81,29 +82,21 @@ void Cpu::RunLoop(bool tight_loop) {
|
||||
return;
|
||||
}
|
||||
|
||||
Reschedule();
|
||||
|
||||
// If we don't have a currently active thread then don't execute instructions,
|
||||
// instead advance to the next event and try to yield to the next thread
|
||||
if (Kernel::GetCurrentThread() == nullptr) {
|
||||
LOG_TRACE(Core, "Core-{} idling", core_index);
|
||||
|
||||
if (IsMainCore()) {
|
||||
// TODO(Subv): Only let CoreTiming idle if all 4 cores are idling.
|
||||
core_timing.Idle();
|
||||
core_timing.Advance();
|
||||
}
|
||||
|
||||
PrepareReschedule();
|
||||
core_timing.Idle();
|
||||
} else {
|
||||
if (IsMainCore()) {
|
||||
core_timing.Advance();
|
||||
}
|
||||
|
||||
if (tight_loop) {
|
||||
arm_interface->Run();
|
||||
} else {
|
||||
arm_interface->Step();
|
||||
}
|
||||
}
|
||||
core_timing.Advance();
|
||||
|
||||
Reschedule();
|
||||
}
|
||||
@@ -114,18 +107,18 @@ void Cpu::SingleStep() {
|
||||
|
||||
void Cpu::PrepareReschedule() {
|
||||
arm_interface->PrepareReschedule();
|
||||
reschedule_pending = true;
|
||||
}
|
||||
|
||||
void Cpu::Reschedule() {
|
||||
if (!reschedule_pending) {
|
||||
return;
|
||||
}
|
||||
|
||||
reschedule_pending = false;
|
||||
// Lock the global kernel mutex when we manipulate the HLE state
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
scheduler->Reschedule();
|
||||
std::lock_guard lock(HLE::g_hle_lock);
|
||||
|
||||
global_scheduler.SelectThread(core_index);
|
||||
scheduler->TryDoContextSwitch();
|
||||
}
|
||||
|
||||
void Cpu::Shutdown() {
|
||||
scheduler->Shutdown();
|
||||
}
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -12,8 +12,9 @@
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
class GlobalScheduler;
|
||||
class Scheduler;
|
||||
}
|
||||
} // namespace Kernel
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -83,6 +84,8 @@ public:
|
||||
return core_index;
|
||||
}
|
||||
|
||||
void Shutdown();
|
||||
|
||||
static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores);
|
||||
|
||||
private:
|
||||
@@ -90,6 +93,7 @@ private:
|
||||
|
||||
std::unique_ptr<ARM_Interface> arm_interface;
|
||||
CpuBarrier& cpu_barrier;
|
||||
Kernel::GlobalScheduler& global_scheduler;
|
||||
std::unique_ptr<Kernel::Scheduler> scheduler;
|
||||
Timing::CoreTiming& core_timing;
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
namespace Core::Timing {
|
||||
|
||||
constexpr int MAX_SLICE_LENGTH = 20000;
|
||||
constexpr int MAX_SLICE_LENGTH = 10000;
|
||||
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
@@ -38,10 +38,12 @@ CoreTiming::CoreTiming() = default;
|
||||
CoreTiming::~CoreTiming() = default;
|
||||
|
||||
void CoreTiming::Initialize() {
|
||||
downcount = MAX_SLICE_LENGTH;
|
||||
downcounts.fill(MAX_SLICE_LENGTH);
|
||||
time_slice.fill(MAX_SLICE_LENGTH);
|
||||
slice_length = MAX_SLICE_LENGTH;
|
||||
global_timer = 0;
|
||||
idled_cycles = 0;
|
||||
current_context = 0;
|
||||
|
||||
// The time between CoreTiming being initialized and the first call to Advance() is considered
|
||||
// the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
|
||||
@@ -110,7 +112,7 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
|
||||
u64 CoreTiming::GetTicks() const {
|
||||
u64 ticks = static_cast<u64>(global_timer);
|
||||
if (!is_global_timer_sane) {
|
||||
ticks += slice_length - downcount;
|
||||
ticks += accumulated_ticks;
|
||||
}
|
||||
return ticks;
|
||||
}
|
||||
@@ -120,7 +122,8 @@ u64 CoreTiming::GetIdleTicks() const {
|
||||
}
|
||||
|
||||
void CoreTiming::AddTicks(u64 ticks) {
|
||||
downcount -= static_cast<int>(ticks);
|
||||
accumulated_ticks += ticks;
|
||||
downcounts[current_context] -= static_cast<s64>(ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
@@ -141,22 +144,35 @@ void CoreTiming::RemoveEvent(const EventType* event_type) {
|
||||
|
||||
void CoreTiming::ForceExceptionCheck(s64 cycles) {
|
||||
cycles = std::max<s64>(0, cycles);
|
||||
if (downcount <= cycles) {
|
||||
if (downcounts[current_context] <= cycles) {
|
||||
return;
|
||||
}
|
||||
|
||||
// downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
|
||||
// here. Account for cycles already executed by adjusting the g.slice_length
|
||||
slice_length -= downcount - static_cast<int>(cycles);
|
||||
downcount = static_cast<int>(cycles);
|
||||
downcounts[current_context] = static_cast<int>(cycles);
|
||||
}
|
||||
|
||||
std::optional<u64> CoreTiming::NextAvailableCore(const s64 needed_ticks) const {
|
||||
const u64 original_context = current_context;
|
||||
u64 next_context = (original_context + 1) % num_cpu_cores;
|
||||
while (next_context != original_context) {
|
||||
if (time_slice[next_context] >= needed_ticks) {
|
||||
return {next_context};
|
||||
} else if (time_slice[next_context] >= 0) {
|
||||
return std::nullopt;
|
||||
}
|
||||
next_context = (next_context + 1) % num_cpu_cores;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void CoreTiming::Advance() {
|
||||
std::unique_lock<std::mutex> guard(inner_mutex);
|
||||
|
||||
const int cycles_executed = slice_length - downcount;
|
||||
const u64 cycles_executed = accumulated_ticks;
|
||||
time_slice[current_context] = std::max<s64>(0, time_slice[current_context] - accumulated_ticks);
|
||||
global_timer += cycles_executed;
|
||||
slice_length = MAX_SLICE_LENGTH;
|
||||
|
||||
is_global_timer_sane = true;
|
||||
|
||||
@@ -173,24 +189,46 @@ void CoreTiming::Advance() {
|
||||
|
||||
// Still events left (scheduled in the future)
|
||||
if (!event_queue.empty()) {
|
||||
slice_length = static_cast<int>(
|
||||
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH));
|
||||
const s64 needed_ticks =
|
||||
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
|
||||
const auto next_core = NextAvailableCore(needed_ticks);
|
||||
if (next_core) {
|
||||
downcounts[*next_core] = needed_ticks;
|
||||
}
|
||||
}
|
||||
|
||||
downcount = slice_length;
|
||||
accumulated_ticks = 0;
|
||||
|
||||
downcounts[current_context] = time_slice[current_context];
|
||||
}
|
||||
|
||||
void CoreTiming::ResetRun() {
|
||||
downcounts.fill(MAX_SLICE_LENGTH);
|
||||
time_slice.fill(MAX_SLICE_LENGTH);
|
||||
current_context = 0;
|
||||
// Still events left (scheduled in the future)
|
||||
if (!event_queue.empty()) {
|
||||
const s64 needed_ticks =
|
||||
std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH);
|
||||
downcounts[current_context] = needed_ticks;
|
||||
}
|
||||
|
||||
is_global_timer_sane = false;
|
||||
accumulated_ticks = 0;
|
||||
}
|
||||
|
||||
void CoreTiming::Idle() {
|
||||
idled_cycles += downcount;
|
||||
downcount = 0;
|
||||
accumulated_ticks += downcounts[current_context];
|
||||
idled_cycles += downcounts[current_context];
|
||||
downcounts[current_context] = 0;
|
||||
}
|
||||
|
||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE};
|
||||
}
|
||||
|
||||
int CoreTiming::GetDowncount() const {
|
||||
return downcount;
|
||||
s64 CoreTiming::GetDowncount() const {
|
||||
return downcounts[current_context];
|
||||
}
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
@@ -104,7 +105,19 @@ public:
|
||||
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
int GetDowncount() const;
|
||||
void ResetRun();
|
||||
|
||||
s64 GetDowncount() const;
|
||||
|
||||
void SwitchContext(u64 new_context) {
|
||||
current_context = new_context;
|
||||
}
|
||||
|
||||
bool CanCurrentContextRun() const {
|
||||
return time_slice[current_context] > 0;
|
||||
}
|
||||
|
||||
std::optional<u64> NextAvailableCore(const s64 needed_ticks) const;
|
||||
|
||||
private:
|
||||
struct Event;
|
||||
@@ -112,10 +125,16 @@ private:
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
static constexpr u64 num_cpu_cores = 4;
|
||||
|
||||
s64 global_timer = 0;
|
||||
s64 idled_cycles = 0;
|
||||
int slice_length = 0;
|
||||
int downcount = 0;
|
||||
s64 slice_length = 0;
|
||||
u64 accumulated_ticks = 0;
|
||||
std::array<s64, num_cpu_cores> downcounts{};
|
||||
// Slice of time assigned to each core per run.
|
||||
std::array<s64, num_cpu_cores> time_slice{};
|
||||
u64 current_context = 0;
|
||||
|
||||
// Are we in a function that has been called from Advance()
|
||||
// If events are scheduled from a function that gets called from Advance(),
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/cpu_core_manager.h"
|
||||
#include "core/gdbstub/gdbstub.h"
|
||||
#include "core/settings.h"
|
||||
@@ -57,6 +58,7 @@ void CpuCoreManager::Shutdown() {
|
||||
|
||||
thread_to_cpu.clear();
|
||||
for (auto& cpu_core : cores) {
|
||||
cpu_core->Shutdown();
|
||||
cpu_core.reset();
|
||||
}
|
||||
|
||||
@@ -122,13 +124,19 @@ void CpuCoreManager::RunLoop(bool tight_loop) {
|
||||
}
|
||||
}
|
||||
|
||||
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
|
||||
cores[active_core]->RunLoop(tight_loop);
|
||||
if (Settings::values.use_multi_core) {
|
||||
// Cores 1-3 are run on other threads in this mode
|
||||
break;
|
||||
auto& core_timing = system.CoreTiming();
|
||||
core_timing.ResetRun();
|
||||
bool keep_running{};
|
||||
do {
|
||||
keep_running = false;
|
||||
for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) {
|
||||
core_timing.SwitchContext(active_core);
|
||||
if (core_timing.CanCurrentContextRun()) {
|
||||
cores[active_core]->RunLoop(tight_loop);
|
||||
}
|
||||
keep_running |= core_timing.CanCurrentContextRun();
|
||||
}
|
||||
}
|
||||
} while (keep_running);
|
||||
|
||||
if (GDBStub::IsServerEnabled()) {
|
||||
GDBStub::SetCpuStepFlag(false);
|
||||
|
||||
13592
src/core/file_sys/system_archive/data/font_chinese_simplified.cpp
Normal file
13592
src/core/file_sys/system_archive/data/font_chinese_simplified.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 217276> FONT_CHINESE_SIMPLIFIED;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
13902
src/core/file_sys/system_archive/data/font_chinese_traditional.cpp
Normal file
13902
src/core/file_sys/system_archive/data/font_chinese_traditional.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 222236> FONT_CHINESE_TRADITIONAL;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 293516> FONT_EXTENDED_CHINESE_SIMPLIFIED;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
13592
src/core/file_sys/system_archive/data/font_korean.cpp
Normal file
13592
src/core/file_sys/system_archive/data/font_korean.cpp
Normal file
File diff suppressed because it is too large
Load Diff
13
src/core/file_sys/system_archive/data/font_korean.h
Normal file
13
src/core/file_sys/system_archive/data/font_korean.h
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 217276> FONT_KOREAN;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
196
src/core/file_sys/system_archive/data/font_nintendo_extended.cpp
Normal file
196
src/core/file_sys/system_archive/data/font_nintendo_extended.cpp
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/file_sys/system_archive/data/font_nintendo_extended.h"
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
const std::array<unsigned char, 2932> FONT_NINTENDO_EXTENDED{{
|
||||
0x00, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x80, 0x00, 0x03, 0x00, 0x70, 0x44, 0x53, 0x49, 0x47,
|
||||
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x0b, 0x6c, 0x00, 0x00, 0x00, 0x08, 0x4f, 0x53, 0x2f, 0x32,
|
||||
0x33, 0x86, 0x1d, 0x9b, 0x00, 0x00, 0x01, 0x78, 0x00, 0x00, 0x00, 0x60, 0x63, 0x6d, 0x61, 0x70,
|
||||
0xc2, 0x06, 0x20, 0xde, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x63, 0x76, 0x74, 0x20,
|
||||
0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x04, 0x2c, 0x00, 0x00, 0x00, 0x06, 0x66, 0x70, 0x67, 0x6d,
|
||||
0x06, 0x59, 0x9c, 0x37, 0x00, 0x00, 0x02, 0xa0, 0x00, 0x00, 0x01, 0x73, 0x67, 0x61, 0x73, 0x70,
|
||||
0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x0b, 0x64, 0x00, 0x00, 0x00, 0x08, 0x67, 0x6c, 0x79, 0x66,
|
||||
0x10, 0x31, 0x88, 0x00, 0x00, 0x00, 0x04, 0x34, 0x00, 0x00, 0x04, 0x64, 0x68, 0x65, 0x61, 0x64,
|
||||
0x15, 0x9d, 0xef, 0x91, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, 0x36, 0x68, 0x68, 0x65, 0x61,
|
||||
0x09, 0x60, 0x03, 0x71, 0x00, 0x00, 0x01, 0x34, 0x00, 0x00, 0x00, 0x24, 0x68, 0x6d, 0x74, 0x78,
|
||||
0x0d, 0x2e, 0x03, 0xa7, 0x00, 0x00, 0x01, 0xd8, 0x00, 0x00, 0x00, 0x26, 0x6c, 0x6f, 0x63, 0x61,
|
||||
0x05, 0xc0, 0x04, 0x6c, 0x00, 0x00, 0x08, 0x98, 0x00, 0x00, 0x00, 0x1e, 0x6d, 0x61, 0x78, 0x70,
|
||||
0x02, 0x1c, 0x00, 0x5f, 0x00, 0x00, 0x01, 0x58, 0x00, 0x00, 0x00, 0x20, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x7c, 0xe0, 0x84, 0x5c, 0x00, 0x00, 0x08, 0xb8, 0x00, 0x00, 0x02, 0x09, 0x70, 0x6f, 0x73, 0x74,
|
||||
0x47, 0x4e, 0x74, 0x19, 0x00, 0x00, 0x0a, 0xc4, 0x00, 0x00, 0x00, 0x9e, 0x70, 0x72, 0x65, 0x70,
|
||||
0x1c, 0xfc, 0x7d, 0x9c, 0x00, 0x00, 0x04, 0x14, 0x00, 0x00, 0x00, 0x16, 0x00, 0x01, 0x00, 0x00,
|
||||
0x00, 0x01, 0x00, 0x00, 0x7c, 0xc7, 0xb1, 0x63, 0x5f, 0x0f, 0x3c, 0xf5, 0x00, 0x1b, 0x03, 0xe8,
|
||||
0x00, 0x00, 0x00, 0x00, 0xd9, 0x44, 0x2f, 0x5d, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x45, 0x7b, 0x69,
|
||||
0x00, 0x00, 0x00, 0x00, 0x03, 0xe6, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x03, 0x84, 0xff, 0x83, 0x01, 0xf4, 0x03, 0xe8,
|
||||
0x00, 0x00, 0x00, 0x00, 0x03, 0xe6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x5e,
|
||||
0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00,
|
||||
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x74, 0x01, 0x90, 0x00, 0x05,
|
||||
0x00, 0x04, 0x00, 0xcd, 0x00, 0xcd, 0x00, 0x00, 0x01, 0x1f, 0x00, 0xcd, 0x00, 0xcd, 0x00, 0x00,
|
||||
0x03, 0xc3, 0x00, 0x66, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x00, 0xc0, 0x00, 0x00, 0xe0, 0xe9, 0x03, 0x84, 0xff, 0x83,
|
||||
0x01, 0xf4, 0x02, 0xee, 0x00, 0xfa, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8,
|
||||
0x02, 0xbc, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0xfa, 0x00, 0x00, 0x00, 0xfa, 0x00, 0x00, 0x03, 0xe8, 0x00, 0xeb, 0x01, 0x21, 0x00, 0xff,
|
||||
0x00, 0xff, 0x01, 0x3d, 0x01, 0x17, 0x00, 0x42, 0x00, 0x1c, 0x00, 0x3e, 0x00, 0x17, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x68, 0x00, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x1c, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x68, 0x00, 0x06, 0x00, 0x4c,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x0a,
|
||||
0x00, 0x08, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x20, 0xe0, 0xe9, 0xff, 0xff,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x20, 0xe0, 0xe0, 0xff, 0xff, 0x00, 0x01, 0xff, 0xf5,
|
||||
0xff, 0xe3, 0x1f, 0x24, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0xb8, 0x00, 0x00, 0x2c, 0x4b, 0xb8, 0x00, 0x09, 0x50, 0x58, 0xb1, 0x01, 0x01, 0x8e, 0x59, 0xb8,
|
||||
0x01, 0xff, 0x85, 0xb8, 0x00, 0x44, 0x1d, 0xb9, 0x00, 0x09, 0x00, 0x03, 0x5f, 0x5e, 0x2d, 0xb8,
|
||||
0x00, 0x01, 0x2c, 0x20, 0x20, 0x45, 0x69, 0x44, 0xb0, 0x01, 0x60, 0x2d, 0xb8, 0x00, 0x02, 0x2c,
|
||||
0xb8, 0x00, 0x01, 0x2a, 0x21, 0x2d, 0xb8, 0x00, 0x03, 0x2c, 0x20, 0x46, 0xb0, 0x03, 0x25, 0x46,
|
||||
0x52, 0x58, 0x23, 0x59, 0x20, 0x8a, 0x20, 0x8a, 0x49, 0x64, 0x8a, 0x20, 0x46, 0x20, 0x68, 0x61,
|
||||
0x64, 0xb0, 0x04, 0x25, 0x46, 0x20, 0x68, 0x61, 0x64, 0x52, 0x58, 0x23, 0x65, 0x8a, 0x59, 0x2f,
|
||||
0x20, 0xb0, 0x00, 0x53, 0x58, 0x69, 0x20, 0xb0, 0x00, 0x54, 0x58, 0x21, 0xb0, 0x40, 0x59, 0x1b,
|
||||
0x69, 0x20, 0xb0, 0x00, 0x54, 0x58, 0x21, 0xb0, 0x40, 0x65, 0x59, 0x59, 0x3a, 0x2d, 0xb8, 0x00,
|
||||
0x04, 0x2c, 0x20, 0x46, 0xb0, 0x04, 0x25, 0x46, 0x52, 0x58, 0x23, 0x8a, 0x59, 0x20, 0x46, 0x20,
|
||||
0x6a, 0x61, 0x64, 0xb0, 0x04, 0x25, 0x46, 0x20, 0x6a, 0x61, 0x64, 0x52, 0x58, 0x23, 0x8a, 0x59,
|
||||
0x2f, 0xfd, 0x2d, 0xb8, 0x00, 0x05, 0x2c, 0x4b, 0x20, 0xb0, 0x03, 0x26, 0x50, 0x58, 0x51, 0x58,
|
||||
0xb0, 0x80, 0x44, 0x1b, 0xb0, 0x40, 0x44, 0x59, 0x1b, 0x21, 0x21, 0x20, 0x45, 0xb0, 0xc0, 0x50,
|
||||
0x58, 0xb0, 0xc0, 0x44, 0x1b, 0x21, 0x59, 0x59, 0x2d, 0xb8, 0x00, 0x06, 0x2c, 0x20, 0x20, 0x45,
|
||||
0x69, 0x44, 0xb0, 0x01, 0x60, 0x20, 0x20, 0x45, 0x7d, 0x69, 0x18, 0x44, 0xb0, 0x01, 0x60, 0x2d,
|
||||
0xb8, 0x00, 0x07, 0x2c, 0xb8, 0x00, 0x06, 0x2a, 0x2d, 0xb8, 0x00, 0x08, 0x2c, 0x4b, 0x20, 0xb0,
|
||||
0x03, 0x26, 0x53, 0x58, 0xb0, 0x40, 0x1b, 0xb0, 0x00, 0x59, 0x8a, 0x8a, 0x20, 0xb0, 0x03, 0x26,
|
||||
0x53, 0x58, 0x23, 0x21, 0xb0, 0x80, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0, 0x03, 0x26,
|
||||
0x53, 0x58, 0x23, 0x21, 0xb8, 0x00, 0xc0, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0, 0x03,
|
||||
0x26, 0x53, 0x58, 0x23, 0x21, 0xb8, 0x01, 0x00, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20, 0xb0,
|
||||
0x03, 0x26, 0x53, 0x58, 0x23, 0x21, 0xb8, 0x01, 0x40, 0x8a, 0x8a, 0x1b, 0x8a, 0x23, 0x59, 0x20,
|
||||
0xb8, 0x00, 0x03, 0x26, 0x53, 0x58, 0xb0, 0x03, 0x25, 0x45, 0xb8, 0x01, 0x80, 0x50, 0x58, 0x23,
|
||||
0x21, 0xb8, 0x01, 0x80, 0x23, 0x21, 0x1b, 0xb0, 0x03, 0x25, 0x45, 0x23, 0x21, 0x23, 0x21, 0x59,
|
||||
0x1b, 0x21, 0x59, 0x44, 0x2d, 0xb8, 0x00, 0x09, 0x2c, 0x4b, 0x53, 0x58, 0x45, 0x44, 0x1b, 0x21,
|
||||
0x21, 0x59, 0x2d, 0x00, 0xb8, 0x00, 0x00, 0x2b, 0x00, 0xba, 0x00, 0x01, 0x00, 0x01, 0x00, 0x07,
|
||||
0x2b, 0xb8, 0x00, 0x00, 0x20, 0x45, 0x7d, 0x69, 0x18, 0x44, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x03, 0xe6, 0x03, 0xe8, 0x00, 0x06,
|
||||
0x00, 0x00, 0x35, 0x01, 0x33, 0x15, 0x01, 0x23, 0x35, 0x03, 0x52, 0x94, 0xfc, 0xa6, 0x8c, 0x90,
|
||||
0x03, 0x58, 0x86, 0xfc, 0xa0, 0x8e, 0x00, 0x00, 0x00, 0x02, 0x00, 0xeb, 0x00, 0xcc, 0x02, 0xfb,
|
||||
0x03, 0x1e, 0x00, 0x08, 0x00, 0x0f, 0x00, 0x00, 0x01, 0x33, 0x13, 0x23, 0x27, 0x23, 0x07, 0x23,
|
||||
0x13, 0x17, 0x07, 0x06, 0x15, 0x33, 0x27, 0x07, 0x01, 0xbc, 0x6d, 0xd2, 0x7c, 0x26, 0xcc, 0x26,
|
||||
0x7c, 0xd1, 0x35, 0x40, 0x02, 0x89, 0x45, 0x02, 0x03, 0x1e, 0xfd, 0xae, 0x77, 0x77, 0x02, 0x52,
|
||||
0x9b, 0xcc, 0x08, 0x04, 0xda, 0x02, 0x00, 0x00, 0x00, 0x03, 0x01, 0x21, 0x00, 0xcc, 0x02, 0xc5,
|
||||
0x03, 0x1e, 0x00, 0x15, 0x00, 0x1f, 0x00, 0x2b, 0x00, 0x00, 0x25, 0x11, 0x33, 0x32, 0x1e, 0x02,
|
||||
0x15, 0x14, 0x0e, 0x02, 0x07, 0x1e, 0x01, 0x15, 0x14, 0x0e, 0x02, 0x2b, 0x01, 0x13, 0x33, 0x32,
|
||||
0x36, 0x35, 0x34, 0x26, 0x2b, 0x01, 0x1d, 0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x2b,
|
||||
0x01, 0x15, 0x01, 0x21, 0xea, 0x25, 0x3f, 0x2e, 0x1a, 0x0e, 0x15, 0x1b, 0x0e, 0x2d, 0x2d, 0x1a,
|
||||
0x2e, 0x3f, 0x25, 0xf8, 0x76, 0x62, 0x20, 0x2a, 0x28, 0x22, 0x62, 0x76, 0x10, 0x18, 0x11, 0x09,
|
||||
0x22, 0x22, 0x74, 0xcc, 0x02, 0x52, 0x18, 0x2b, 0x3c, 0x24, 0x1d, 0x1f, 0x17, 0x17, 0x14, 0x0f,
|
||||
0x48, 0x2f, 0x24, 0x3f, 0x2e, 0x1a, 0x01, 0x5b, 0x29, 0x20, 0x20, 0x2b, 0x94, 0xf8, 0x0e, 0x16,
|
||||
0x1c, 0x0e, 0x1f, 0x31, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xff, 0x00, 0xcc, 0x02, 0xe7,
|
||||
0x03, 0x1e, 0x00, 0x0c, 0x00, 0x00, 0x01, 0x33, 0x17, 0x37, 0x33, 0x03, 0x13, 0x23, 0x27, 0x07,
|
||||
0x23, 0x13, 0x03, 0x01, 0x04, 0x86, 0x69, 0x69, 0x86, 0xa3, 0xa8, 0x88, 0x6c, 0x6c, 0x88, 0xa8,
|
||||
0xa3, 0x03, 0x1e, 0xcb, 0xcb, 0xfe, 0xda, 0xfe, 0xd4, 0xcf, 0xcf, 0x01, 0x2c, 0x01, 0x26, 0x00,
|
||||
0x00, 0x01, 0x00, 0xff, 0x00, 0xcc, 0x02, 0xe7, 0x03, 0x1e, 0x00, 0x0f, 0x00, 0x00, 0x01, 0x03,
|
||||
0x33, 0x17, 0x32, 0x15, 0x1e, 0x01, 0x15, 0x1b, 0x01, 0x33, 0x03, 0x15, 0x23, 0x35, 0x01, 0xb8,
|
||||
0xb9, 0x7e, 0x01, 0x01, 0x01, 0x03, 0x70, 0x75, 0x7f, 0xb9, 0x76, 0x01, 0xa3, 0x01, 0x7b, 0x01,
|
||||
0x01, 0x01, 0x05, 0x02, 0xff, 0x00, 0x01, 0x0a, 0xfe, 0x85, 0xd7, 0xd7, 0x00, 0x01, 0x01, 0x3d,
|
||||
0x00, 0xcc, 0x02, 0xa9, 0x03, 0x1e, 0x00, 0x06, 0x00, 0x00, 0x25, 0x11, 0x33, 0x11, 0x33, 0x15,
|
||||
0x21, 0x01, 0x3d, 0x75, 0xf7, 0xfe, 0x94, 0xcc, 0x02, 0x52, 0xfe, 0x10, 0x62, 0x00, 0x00, 0x00,
|
||||
0x00, 0x02, 0x01, 0x17, 0x00, 0xbc, 0x02, 0xcf, 0x03, 0x0e, 0x00, 0x15, 0x00, 0x21, 0x00, 0x00,
|
||||
0x25, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x1d, 0x01, 0x0e, 0x03, 0x1d, 0x01, 0x17, 0x15, 0x23, 0x27,
|
||||
0x23, 0x15, 0x23, 0x13, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x2b, 0x01, 0x15, 0x01, 0x17,
|
||||
0xf4, 0x27, 0x40, 0x2e, 0x19, 0x01, 0x1f, 0x24, 0x1e, 0x78, 0x7d, 0x6a, 0x5c, 0x75, 0x76, 0x72,
|
||||
0x12, 0x19, 0x11, 0x08, 0x26, 0x26, 0x6a, 0xbc, 0x02, 0x52, 0x1d, 0x31, 0x42, 0x25, 0x16, 0x18,
|
||||
0x32, 0x2a, 0x1b, 0x02, 0x01, 0xef, 0x06, 0xd7, 0xd7, 0x01, 0x3f, 0x10, 0x1a, 0x1e, 0x0f, 0x23,
|
||||
0x36, 0xb0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x42, 0x00, 0xbc, 0x03, 0xa4, 0x03, 0x0e, 0x00, 0x0a,
|
||||
0x00, 0x11, 0x00, 0x00, 0x13, 0x35, 0x21, 0x15, 0x01, 0x21, 0x15, 0x21, 0x35, 0x01, 0x21, 0x01,
|
||||
0x11, 0x33, 0x11, 0x33, 0x15, 0x21, 0x42, 0x01, 0xa7, 0xfe, 0xeb, 0x01, 0x1b, 0xfe, 0x53, 0x01,
|
||||
0x15, 0xfe, 0xeb, 0x01, 0xf7, 0x75, 0xf6, 0xfe, 0x95, 0x02, 0xac, 0x62, 0x45, 0xfe, 0x55, 0x62,
|
||||
0x47, 0x01, 0xa9, 0xfe, 0x10, 0x02, 0x52, 0xfe, 0x10, 0x62, 0x00, 0x00, 0x00, 0x03, 0x00, 0x1c,
|
||||
0x00, 0xbc, 0x03, 0xca, 0x03, 0x0e, 0x00, 0x0a, 0x00, 0x21, 0x00, 0x2f, 0x00, 0x00, 0x13, 0x35,
|
||||
0x21, 0x15, 0x01, 0x21, 0x15, 0x21, 0x35, 0x01, 0x21, 0x01, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x15,
|
||||
0x14, 0x06, 0x07, 0x0e, 0x03, 0x15, 0x17, 0x15, 0x23, 0x27, 0x23, 0x15, 0x23, 0x13, 0x33, 0x32,
|
||||
0x3e, 0x02, 0x35, 0x34, 0x2e, 0x02, 0x2b, 0x01, 0x15, 0x1c, 0x01, 0xa7, 0xfe, 0xeb, 0x01, 0x1b,
|
||||
0xfe, 0x53, 0x01, 0x15, 0xfe, 0xeb, 0x01, 0xf7, 0xf3, 0x27, 0x41, 0x2d, 0x19, 0x1c, 0x20, 0x01,
|
||||
0x0d, 0x0e, 0x0a, 0x78, 0x7d, 0x69, 0x5c, 0x75, 0x76, 0x71, 0x11, 0x1a, 0x12, 0x09, 0x0a, 0x14,
|
||||
0x1d, 0x13, 0x69, 0x02, 0xac, 0x62, 0x45, 0xfe, 0x55, 0x62, 0x47, 0x01, 0xa9, 0xfe, 0x10, 0x02,
|
||||
0x52, 0x1d, 0x31, 0x42, 0x25, 0x2b, 0x44, 0x1d, 0x01, 0x08, 0x09, 0x07, 0x01, 0xf1, 0x06, 0xd7,
|
||||
0xd7, 0x01, 0x3f, 0x11, 0x19, 0x1f, 0x0e, 0x11, 0x20, 0x19, 0x0f, 0xb0, 0x00, 0x02, 0x00, 0x3e,
|
||||
0x00, 0xb3, 0x03, 0xa8, 0x03, 0x17, 0x00, 0x3a, 0x00, 0x41, 0x00, 0x00, 0x13, 0x34, 0x3e, 0x02,
|
||||
0x33, 0x32, 0x1e, 0x02, 0x15, 0x23, 0x27, 0x34, 0x27, 0x2e, 0x01, 0x23, 0x22, 0x0e, 0x02, 0x15,
|
||||
0x14, 0x16, 0x15, 0x1e, 0x05, 0x15, 0x14, 0x0e, 0x02, 0x23, 0x22, 0x2e, 0x02, 0x35, 0x33, 0x1e,
|
||||
0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x2e, 0x04, 0x35, 0x01, 0x11, 0x33, 0x11, 0x33, 0x15,
|
||||
0x21, 0x50, 0x24, 0x3b, 0x4a, 0x27, 0x28, 0x4b, 0x39, 0x22, 0x73, 0x01, 0x01, 0x08, 0x2b, 0x29,
|
||||
0x10, 0x20, 0x19, 0x0f, 0x01, 0x0b, 0x35, 0x41, 0x46, 0x3b, 0x25, 0x23, 0x3a, 0x4b, 0x27, 0x2b,
|
||||
0x50, 0x3f, 0x26, 0x74, 0x05, 0x34, 0x33, 0x10, 0x20, 0x1a, 0x11, 0x2c, 0x42, 0x4d, 0x42, 0x2c,
|
||||
0x01, 0xef, 0x73, 0xf6, 0xfe, 0x97, 0x02, 0x70, 0x2a, 0x3f, 0x2a, 0x14, 0x18, 0x2e, 0x44, 0x2c,
|
||||
0x02, 0x03, 0x01, 0x27, 0x27, 0x07, 0x10, 0x1a, 0x12, 0x02, 0x0b, 0x02, 0x1f, 0x22, 0x19, 0x17,
|
||||
0x27, 0x3f, 0x34, 0x2c, 0x3e, 0x28, 0x13, 0x1a, 0x32, 0x48, 0x2e, 0x30, 0x30, 0x06, 0x0f, 0x1a,
|
||||
0x13, 0x21, 0x27, 0x1e, 0x1b, 0x29, 0x3e, 0x31, 0xfe, 0x4c, 0x02, 0x53, 0xfe, 0x10, 0x63, 0x00,
|
||||
0x00, 0x03, 0x00, 0x17, 0x00, 0xb3, 0x03, 0xce, 0x03, 0x17, 0x00, 0x38, 0x00, 0x4f, 0x00, 0x5d,
|
||||
0x00, 0x00, 0x13, 0x34, 0x3e, 0x02, 0x33, 0x32, 0x1e, 0x02, 0x15, 0x23, 0x27, 0x34, 0x23, 0x2e,
|
||||
0x01, 0x23, 0x22, 0x0e, 0x02, 0x15, 0x14, 0x1e, 0x04, 0x15, 0x14, 0x0e, 0x02, 0x23, 0x22, 0x2e,
|
||||
0x02, 0x35, 0x33, 0x1e, 0x01, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x26, 0x27, 0x2e, 0x03, 0x35,
|
||||
0x01, 0x11, 0x33, 0x32, 0x1e, 0x02, 0x15, 0x14, 0x06, 0x07, 0x30, 0x0e, 0x02, 0x31, 0x17, 0x15,
|
||||
0x23, 0x27, 0x23, 0x15, 0x23, 0x13, 0x33, 0x32, 0x3e, 0x02, 0x35, 0x34, 0x2e, 0x02, 0x2b, 0x01,
|
||||
0x15, 0x2a, 0x24, 0x3a, 0x4a, 0x26, 0x29, 0x4b, 0x39, 0x23, 0x73, 0x01, 0x01, 0x08, 0x2a, 0x2a,
|
||||
0x10, 0x1f, 0x1a, 0x10, 0x2c, 0x42, 0x4d, 0x42, 0x2c, 0x23, 0x39, 0x4b, 0x27, 0x2b, 0x51, 0x3f,
|
||||
0x27, 0x75, 0x05, 0x34, 0x33, 0x10, 0x20, 0x1a, 0x10, 0x1f, 0x1c, 0x25, 0x53, 0x47, 0x2e, 0x01,
|
||||
0xed, 0xf3, 0x27, 0x41, 0x2d, 0x19, 0x1c, 0x20, 0x0c, 0x0e, 0x0c, 0x78, 0x7d, 0x68, 0x5d, 0x75,
|
||||
0x76, 0x71, 0x11, 0x1a, 0x12, 0x09, 0x0a, 0x14, 0x1d, 0x13, 0x69, 0x02, 0x71, 0x2a, 0x3e, 0x2a,
|
||||
0x14, 0x18, 0x2e, 0x44, 0x2c, 0x02, 0x02, 0x27, 0x29, 0x07, 0x11, 0x1a, 0x12, 0x1d, 0x24, 0x1c,
|
||||
0x1d, 0x2b, 0x40, 0x32, 0x2c, 0x3f, 0x29, 0x13, 0x1a, 0x31, 0x49, 0x2e, 0x30, 0x30, 0x06, 0x0f,
|
||||
0x19, 0x13, 0x1e, 0x22, 0x0b, 0x0e, 0x20, 0x2f, 0x43, 0x30, 0xfe, 0x4b, 0x02, 0x52, 0x1d, 0x32,
|
||||
0x42, 0x25, 0x2c, 0x42, 0x1d, 0x08, 0x0a, 0x08, 0xf1, 0x06, 0xd7, 0xd7, 0x01, 0x3f, 0x11, 0x19,
|
||||
0x1f, 0x0e, 0x11, 0x20, 0x19, 0x0f, 0xb0, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x12, 0x00, 0x12,
|
||||
0x00, 0x12, 0x00, 0x32, 0x00, 0x72, 0x00, 0x8e, 0x00, 0xac, 0x00, 0xbe, 0x00, 0xf0, 0x01, 0x14,
|
||||
0x01, 0x5c, 0x01, 0xb6, 0x02, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0xa2, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x02, 0x00, 0x07, 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x2f,
|
||||
0x00, 0x17, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x12, 0x00, 0x46, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0d, 0x00, 0x58, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x06, 0x00, 0x12, 0x00, 0x65, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x01, 0x00, 0x20,
|
||||
0x00, 0x77, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x97, 0x00, 0x03,
|
||||
0x00, 0x01, 0x04, 0x09, 0x00, 0x03, 0x00, 0x5e, 0x00, 0xa5, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09,
|
||||
0x00, 0x04, 0x00, 0x24, 0x01, 0x03, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x05, 0x00, 0x1a,
|
||||
0x01, 0x27, 0x00, 0x03, 0x00, 0x01, 0x04, 0x09, 0x00, 0x06, 0x00, 0x24, 0x01, 0x41, 0x00, 0x03,
|
||||
0x00, 0x01, 0x04, 0x09, 0x00, 0x11, 0x00, 0x02, 0x01, 0x65, 0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53,
|
||||
0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x67, 0x75, 0x6c, 0x61,
|
||||
0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x30, 0x30, 0x30, 0x3b, 0x3b,
|
||||
0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x2d, 0x52, 0x3b, 0x32, 0x30, 0x31, 0x39, 0x3b, 0x46, 0x4c, 0x56, 0x49, 0x2d, 0x36, 0x31, 0x34,
|
||||
0x59, 0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x20, 0x52, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x31, 0x2e, 0x30, 0x30, 0x30, 0x59,
|
||||
0x75, 0x7a, 0x75, 0x4f, 0x53, 0x53, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2d,
|
||||
0x52, 0x00, 0x59, 0x00, 0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00,
|
||||
0x45, 0x00, 0x78, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00,
|
||||
0x6e, 0x00, 0x52, 0x00, 0x65, 0x00, 0x67, 0x00, 0x75, 0x00, 0x6c, 0x00, 0x61, 0x00, 0x72, 0x00,
|
||||
0x56, 0x00, 0x65, 0x00, 0x72, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x20, 0x00,
|
||||
0x31, 0x00, 0x2e, 0x00, 0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x3b, 0x00, 0x3b, 0x00, 0x59, 0x00,
|
||||
0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00,
|
||||
0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x2d, 0x00,
|
||||
0x52, 0x00, 0x3b, 0x00, 0x32, 0x00, 0x30, 0x00, 0x31, 0x00, 0x39, 0x00, 0x3b, 0x00, 0x46, 0x00,
|
||||
0x4c, 0x00, 0x56, 0x00, 0x49, 0x00, 0x2d, 0x00, 0x36, 0x00, 0x31, 0x00, 0x34, 0x00, 0x59, 0x00,
|
||||
0x75, 0x00, 0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00,
|
||||
0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x20, 0x00,
|
||||
0x52, 0x00, 0x56, 0x00, 0x65, 0x00, 0x72, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00,
|
||||
0x20, 0x00, 0x31, 0x00, 0x2e, 0x00, 0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x59, 0x00, 0x75, 0x00,
|
||||
0x7a, 0x00, 0x75, 0x00, 0x4f, 0x00, 0x53, 0x00, 0x53, 0x00, 0x45, 0x00, 0x78, 0x00, 0x74, 0x00,
|
||||
0x65, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x2d, 0x00, 0x52, 0x00,
|
||||
0x52, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x9c, 0x00, 0x32,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x01, 0x02, 0x01, 0x03, 0x00, 0x03, 0x01, 0x04,
|
||||
0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0a, 0x01, 0x0b, 0x01, 0x0c,
|
||||
0x01, 0x0d, 0x07, 0x75, 0x6e, 0x69, 0x30, 0x30, 0x30, 0x30, 0x07, 0x75, 0x6e, 0x69, 0x30, 0x30,
|
||||
0x30, 0x44, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x30, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
|
||||
0x45, 0x31, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x32, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
|
||||
0x45, 0x33, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x34, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
|
||||
0x45, 0x35, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x36, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
|
||||
0x45, 0x37, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30, 0x45, 0x38, 0x07, 0x75, 0x6e, 0x69, 0x45, 0x30,
|
||||
0x45, 0x39, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0xff, 0xff, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
}};
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 2932> FONT_NINTENDO_EXTENDED;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
13592
src/core/file_sys/system_archive/data/font_standard.cpp
Normal file
13592
src/core/file_sys/system_archive/data/font_standard.cpp
Normal file
File diff suppressed because it is too large
Load Diff
13
src/core/file_sys/system_archive/data/font_standard.h
Normal file
13
src/core/file_sys/system_archive/data/font_standard.h
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace FileSys::SystemArchive::SharedFontData {
|
||||
|
||||
extern const std::array<unsigned char, 217276> FONT_STANDARD;
|
||||
|
||||
} // namespace FileSys::SystemArchive::SharedFontData
|
||||
78
src/core/file_sys/system_archive/shared_font.cpp
Normal file
78
src/core/file_sys/system_archive/shared_font.cpp
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/file_sys/system_archive/data/font_chinese_simplified.h"
|
||||
#include "core/file_sys/system_archive/data/font_chinese_traditional.h"
|
||||
#include "core/file_sys/system_archive/data/font_extended_chinese_simplified.h"
|
||||
#include "core/file_sys/system_archive/data/font_korean.h"
|
||||
#include "core/file_sys/system_archive/data/font_nintendo_extended.h"
|
||||
#include "core/file_sys/system_archive/data/font_standard.h"
|
||||
#include "core/file_sys/system_archive/shared_font.h"
|
||||
#include "core/file_sys/vfs_vector.h"
|
||||
#include "core/hle/service/ns/pl_u.h"
|
||||
|
||||
namespace FileSys::SystemArchive {
|
||||
|
||||
namespace {
|
||||
|
||||
template <std::size_t Size>
|
||||
VirtualFile PackBFTTF(const std::array<u8, Size>& data, const std::string& name) {
|
||||
std::vector<u32> vec(Size / sizeof(u32));
|
||||
std::memcpy(vec.data(), data.data(), vec.size() * sizeof(u32));
|
||||
|
||||
std::vector<u8> bfttf(Size + sizeof(u64));
|
||||
|
||||
u64 offset = 0;
|
||||
Service::NS::EncryptSharedFont(vec, bfttf, offset);
|
||||
return std::make_shared<VectorVfsFile>(std::move(bfttf), name);
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
VirtualDir FontNintendoExtension() {
|
||||
return std::make_shared<VectorVfsDirectory>(
|
||||
std::vector<VirtualFile>{
|
||||
PackBFTTF(SharedFontData::FONT_NINTENDO_EXTENDED, "nintendo_ext_003.bfttf"),
|
||||
PackBFTTF(SharedFontData::FONT_NINTENDO_EXTENDED, "nintendo_ext2_003.bfttf"),
|
||||
},
|
||||
std::vector<VirtualDir>{});
|
||||
}
|
||||
|
||||
VirtualDir FontStandard() {
|
||||
return std::make_shared<VectorVfsDirectory>(
|
||||
std::vector<VirtualFile>{
|
||||
PackBFTTF(SharedFontData::FONT_STANDARD, "nintendo_udsg-r_std_003.bfttf"),
|
||||
},
|
||||
std::vector<VirtualDir>{});
|
||||
}
|
||||
|
||||
VirtualDir FontKorean() {
|
||||
return std::make_shared<VectorVfsDirectory>(
|
||||
std::vector<VirtualFile>{
|
||||
PackBFTTF(SharedFontData::FONT_KOREAN, "nintendo_udsg-r_ko_003.bfttf"),
|
||||
},
|
||||
std::vector<VirtualDir>{});
|
||||
}
|
||||
|
||||
VirtualDir FontChineseTraditional() {
|
||||
return std::make_shared<VectorVfsDirectory>(
|
||||
std::vector<VirtualFile>{
|
||||
PackBFTTF(SharedFontData::FONT_CHINESE_TRADITIONAL,
|
||||
"nintendo_udjxh-db_zh-tw_003.bfttf"),
|
||||
},
|
||||
std::vector<VirtualDir>{});
|
||||
}
|
||||
|
||||
VirtualDir FontChineseSimple() {
|
||||
return std::make_shared<VectorVfsDirectory>(
|
||||
std::vector<VirtualFile>{
|
||||
PackBFTTF(SharedFontData::FONT_CHINESE_SIMPLIFIED,
|
||||
"nintendo_udsg-r_org_zh-cn_003.bfttf"),
|
||||
PackBFTTF(SharedFontData::FONT_EXTENDED_CHINESE_SIMPLIFIED,
|
||||
"nintendo_udsg-r_ext_zh-cn_003.bfttf"),
|
||||
},
|
||||
std::vector<VirtualDir>{});
|
||||
}
|
||||
|
||||
} // namespace FileSys::SystemArchive
|
||||
17
src/core/file_sys/system_archive/shared_font.h
Normal file
17
src/core/file_sys/system_archive/shared_font.h
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2019 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/file_sys/vfs_types.h"
|
||||
|
||||
namespace FileSys::SystemArchive {
|
||||
|
||||
VirtualDir FontNintendoExtension();
|
||||
VirtualDir FontStandard();
|
||||
VirtualDir FontKorean();
|
||||
VirtualDir FontChineseTraditional();
|
||||
VirtualDir FontChineseSimple();
|
||||
|
||||
} // namespace FileSys::SystemArchive
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "core/file_sys/romfs.h"
|
||||
#include "core/file_sys/system_archive/mii_model.h"
|
||||
#include "core/file_sys/system_archive/ng_word.h"
|
||||
#include "core/file_sys/system_archive/shared_font.h"
|
||||
#include "core/file_sys/system_archive/system_archive.h"
|
||||
#include "core/file_sys/system_archive/system_version.h"
|
||||
|
||||
@@ -39,11 +40,11 @@ constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHI
|
||||
{0x010000000000080D, "UrlBlackList", nullptr},
|
||||
{0x010000000000080E, "TimeZoneBinary", nullptr},
|
||||
{0x010000000000080F, "CertStoreCruiser", nullptr},
|
||||
{0x0100000000000810, "FontNintendoExtension", nullptr},
|
||||
{0x0100000000000811, "FontStandard", nullptr},
|
||||
{0x0100000000000812, "FontKorean", nullptr},
|
||||
{0x0100000000000813, "FontChineseTraditional", nullptr},
|
||||
{0x0100000000000814, "FontChineseSimple", nullptr},
|
||||
{0x0100000000000810, "FontNintendoExtension", &FontNintendoExtension},
|
||||
{0x0100000000000811, "FontStandard", &FontStandard},
|
||||
{0x0100000000000812, "FontKorean", &FontKorean},
|
||||
{0x0100000000000813, "FontChineseTraditional", &FontChineseTraditional},
|
||||
{0x0100000000000814, "FontChineseSimple", &FontChineseSimple},
|
||||
{0x0100000000000815, "FontBfcpx", nullptr},
|
||||
{0x0100000000000816, "SystemUpdate", nullptr},
|
||||
{0x0100000000000817, "0100000000000817", nullptr},
|
||||
|
||||
@@ -202,13 +202,11 @@ void RegisterModule(std::string name, VAddr beg, VAddr end, bool add_elf_ext) {
|
||||
}
|
||||
|
||||
static Kernel::Thread* FindThreadById(s64 id) {
|
||||
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
|
||||
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadID() == static_cast<u64>(id)) {
|
||||
current_core = core;
|
||||
return thread.get();
|
||||
}
|
||||
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadID() == static_cast<u64>(id)) {
|
||||
current_core = thread->GetProcessorID();
|
||||
return thread.get();
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
@@ -647,11 +645,9 @@ static void HandleQuery() {
|
||||
SendReply(buffer.c_str());
|
||||
} else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) {
|
||||
std::string val = "m";
|
||||
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
|
||||
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x},", thread->GetThreadID());
|
||||
}
|
||||
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
val += fmt::format("{:x},", thread->GetThreadID());
|
||||
}
|
||||
val.pop_back();
|
||||
SendReply(val.c_str());
|
||||
@@ -661,13 +657,11 @@ static void HandleQuery() {
|
||||
std::string buffer;
|
||||
buffer += "l<?xml version=\"1.0\"?>";
|
||||
buffer += "<threads>";
|
||||
for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) {
|
||||
const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
buffer +=
|
||||
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadID(), core, thread->GetThreadID());
|
||||
}
|
||||
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
|
||||
for (const auto& thread : threads) {
|
||||
buffer +=
|
||||
fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*",
|
||||
thread->GetThreadID(), thread->GetProcessorID(), thread->GetThreadID());
|
||||
}
|
||||
buffer += "</threads>";
|
||||
SendReply(buffer.c_str());
|
||||
|
||||
@@ -22,6 +22,7 @@ namespace Kernel {
|
||||
namespace {
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
|
||||
auto& system = Core::System::GetInstance();
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
@@ -35,6 +36,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
|
||||
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
waiting_threads[i]->SetArbiterWaitAddress(0);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
system.PrepareReschedule(waiting_threads[i]->GetProcessorID());
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
@@ -89,12 +91,20 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
||||
|
||||
// Determine the modified value depending on the waiting count.
|
||||
s32 updated_value;
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
updated_value = value;
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (static_cast<s32>(Memory::Read32(address)) != value) {
|
||||
@@ -169,30 +179,22 @@ ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) {
|
||||
|
||||
current_thread->WakeAfterDelay(timeout);
|
||||
|
||||
system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(current_thread->GetProcessorID());
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const {
|
||||
const auto RetrieveWaitingThreads = [this](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr arb_addr) {
|
||||
const auto& scheduler = system.Scheduler(core_index);
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetArbiterWaitAddress() == arb_addr) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Retrieve all threads that are waiting for this address.
|
||||
std::vector<SharedPtr<Thread>> threads;
|
||||
RetrieveWaitingThreads(0, threads, address);
|
||||
RetrieveWaitingThreads(1, threads, address);
|
||||
RetrieveWaitingThreads(2, threads, address);
|
||||
RetrieveWaitingThreads(3, threads, address);
|
||||
const auto& scheduler = system.GlobalScheduler();
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetArbiterWaitAddress() == address) {
|
||||
threads.push_back(thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(threads.begin(), threads.end(),
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/scheduler.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -58,12 +61,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
if (thread->HasWakeupCallback()) {
|
||||
resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
|
||||
thread->GetWaitHandle() != 0) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
|
||||
thread->GetStatus() == ThreadStatus::WaitCondVar);
|
||||
} else if (thread->GetStatus() == ThreadStatus::WaitMutex ||
|
||||
thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
@@ -83,18 +82,23 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
|
||||
}
|
||||
|
||||
if (resume) {
|
||||
if (thread->GetStatus() == ThreadStatus::WaitCondVar ||
|
||||
thread->GetStatus() == ThreadStatus::WaitArb) {
|
||||
thread->SetWaitSynchronizationResult(RESULT_TIMEOUT);
|
||||
}
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system) : system{system} {}
|
||||
explicit Impl(Core::System& system) : system{system}, global_scheduler{system} {}
|
||||
|
||||
void Initialize(KernelCore& kernel) {
|
||||
Shutdown();
|
||||
|
||||
InitializeSystemResourceLimit(kernel);
|
||||
InitializeThreads();
|
||||
InitializePreemption();
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
@@ -110,6 +114,9 @@ struct KernelCore::Impl {
|
||||
|
||||
thread_wakeup_callback_handle_table.Clear();
|
||||
thread_wakeup_event_type = nullptr;
|
||||
preemption_event = nullptr;
|
||||
|
||||
global_scheduler.Shutdown();
|
||||
|
||||
named_ports.clear();
|
||||
}
|
||||
@@ -132,6 +139,18 @@ struct KernelCore::Impl {
|
||||
system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback);
|
||||
}
|
||||
|
||||
void InitializePreemption() {
|
||||
preemption_event = system.CoreTiming().RegisterEvent(
|
||||
"PreemptionCallback", [this](u64 userdata, s64 cycles_late) {
|
||||
global_scheduler.PreemptThreads();
|
||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
});
|
||||
|
||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
}
|
||||
|
||||
std::atomic<u32> next_object_id{0};
|
||||
std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
|
||||
std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
|
||||
@@ -140,10 +159,12 @@ struct KernelCore::Impl {
|
||||
// Lists all processes that exist in the current session.
|
||||
std::vector<SharedPtr<Process>> process_list;
|
||||
Process* current_process = nullptr;
|
||||
Kernel::GlobalScheduler global_scheduler;
|
||||
|
||||
SharedPtr<ResourceLimit> system_resource_limit;
|
||||
|
||||
Core::Timing::EventType* thread_wakeup_event_type = nullptr;
|
||||
Core::Timing::EventType* preemption_event = nullptr;
|
||||
// TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
|
||||
// allowing us to simply use a pool index or similar.
|
||||
Kernel::HandleTable thread_wakeup_callback_handle_table;
|
||||
@@ -203,6 +224,14 @@ const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
|
||||
return impl->process_list;
|
||||
}
|
||||
|
||||
Kernel::GlobalScheduler& KernelCore::GlobalScheduler() {
|
||||
return impl->global_scheduler;
|
||||
}
|
||||
|
||||
const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const {
|
||||
return impl->global_scheduler;
|
||||
}
|
||||
|
||||
void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
|
||||
impl->named_ports.emplace(std::move(name), std::move(port));
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ namespace Kernel {
|
||||
|
||||
class AddressArbiter;
|
||||
class ClientPort;
|
||||
class GlobalScheduler;
|
||||
class HandleTable;
|
||||
class Process;
|
||||
class ResourceLimit;
|
||||
@@ -75,6 +76,12 @@ public:
|
||||
/// Retrieves the list of processes.
|
||||
const std::vector<SharedPtr<Process>>& GetProcessList() const;
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
Kernel::GlobalScheduler& GlobalScheduler();
|
||||
|
||||
/// Gets the sole instance of the global scheduler
|
||||
const Kernel::GlobalScheduler& GlobalScheduler() const;
|
||||
|
||||
/// Adds a port to the named port table
|
||||
void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
@@ -78,7 +79,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
// thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
const u32 addr_value = Memory::Read32(address);
|
||||
u32 addr_value = Memory::Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
@@ -89,6 +90,20 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
// This a workaround where an unknown bug writes the mutex value to give ownership to a cond var
|
||||
// waiting thread.
|
||||
if (holding_thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
if (holding_thread->GetMutexWaitAddress() == address) {
|
||||
Release(address, holding_thread.get());
|
||||
addr_value = Memory::Read32(address);
|
||||
if (addr_value == 0)
|
||||
return RESULT_SUCCESS;
|
||||
else {
|
||||
holding_thread = handle_table.Get<Thread>(addr_value & Mutex::MutexOwnerMask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until the mutex is released
|
||||
current_thread->SetMutexWaitAddress(address);
|
||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
||||
@@ -104,14 +119,13 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
ResultCode Mutex::Release(VAddr address, Thread* holding_thread) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address);
|
||||
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(holding_thread, address);
|
||||
|
||||
// There are no more threads waiting for the mutex, release it completely.
|
||||
if (thread == nullptr) {
|
||||
@@ -120,7 +134,7 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
}
|
||||
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, current_thread, thread);
|
||||
TransferMutexOwnership(address, holding_thread, thread);
|
||||
|
||||
u32 mutex_value = thread->GetWaitHandle();
|
||||
|
||||
@@ -139,6 +153,12 @@ ResultCode Mutex::Release(VAddr address) {
|
||||
thread->SetCondVarWaitAddress(0);
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
|
||||
if (thread->GetProcessorID() >= 0)
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
if (holding_thread->GetProcessorID() >= 0)
|
||||
system.CpuCore(holding_thread->GetProcessorID()).PrepareReschedule();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ public:
|
||||
Handle requesting_thread_handle);
|
||||
|
||||
/// Releases the mutex at the specified address.
|
||||
ResultCode Release(VAddr address);
|
||||
ResultCode Release(VAddr address, Thread* holding_thread);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
|
||||
@@ -213,10 +213,7 @@ void Process::PrepareForTermination() {
|
||||
}
|
||||
};
|
||||
|
||||
stop_threads(system.Scheduler(0).GetThreadList());
|
||||
stop_threads(system.Scheduler(1).GetThreadList());
|
||||
stop_threads(system.Scheduler(2).GetThreadList());
|
||||
stop_threads(system.Scheduler(3).GetThreadList());
|
||||
stop_threads(system.GlobalScheduler().GetThreadList());
|
||||
|
||||
FreeTLSRegion(tls_region_address);
|
||||
tls_region_address = 0;
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
//
|
||||
// SelectThreads, Yield functions originally by TuxSH.
|
||||
// licensed under GPLv2 or later under exception provided by the author.
|
||||
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
@@ -17,56 +22,405 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
std::mutex Scheduler::scheduler_mutex;
|
||||
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
|
||||
is_reselection_pending = false;
|
||||
}
|
||||
|
||||
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
|
||||
: cpu_core{cpu_core}, system{system} {}
|
||||
void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
Scheduler::~Scheduler() {
|
||||
for (auto& thread : thread_list) {
|
||||
thread->Stop();
|
||||
void GlobalScheduler::RemoveThread(const Thread* thread) {
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
/*
|
||||
* UnloadThread selects a core and forces it to unload its current thread's context
|
||||
*/
|
||||
void GlobalScheduler::UnloadThread(s32 core) {
|
||||
Scheduler& sched = system.Scheduler(core);
|
||||
sched.UnloadThread();
|
||||
}
|
||||
|
||||
/*
|
||||
* SelectThread takes care of selecting the new scheduled thread.
|
||||
* It does it in 3 steps:
|
||||
* - First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
* - Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
* - Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
*/
|
||||
void GlobalScheduler::SelectThread(u32 core) {
|
||||
const auto update_thread = [](Thread* thread, Scheduler& sched) {
|
||||
if (thread != sched.selected_thread) {
|
||||
if (thread == nullptr) {
|
||||
++sched.idle_selection_count;
|
||||
}
|
||||
sched.selected_thread = thread;
|
||||
}
|
||||
sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
};
|
||||
Scheduler& sched = system.Scheduler(core);
|
||||
Thread* current_thread = nullptr;
|
||||
// Step 1: Get top thread in schedule queue.
|
||||
current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
|
||||
if (current_thread) {
|
||||
update_thread(current_thread, sched);
|
||||
return;
|
||||
}
|
||||
// Step 2: Try selecting a suggested thread.
|
||||
Thread* winner = nullptr;
|
||||
std::set<s32> sug_cores;
|
||||
for (auto thread : suggested_queue[core]) {
|
||||
s32 this_core = thread->GetProcessorID();
|
||||
Thread* thread_on_core = nullptr;
|
||||
if (this_core >= 0) {
|
||||
thread_on_core = scheduled_queue[this_core].front();
|
||||
}
|
||||
if (this_core < 0 || thread != thread_on_core) {
|
||||
winner = thread;
|
||||
break;
|
||||
}
|
||||
sug_cores.insert(this_core);
|
||||
}
|
||||
// if we got a suggested thread, select it, else do a second pass.
|
||||
if (winner && winner->GetPriority() > 2) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core, winner);
|
||||
update_thread(winner, sched);
|
||||
return;
|
||||
}
|
||||
// Step 3: Select a suggested thread from another core
|
||||
for (auto& src_core : sug_cores) {
|
||||
auto it = scheduled_queue[src_core].begin();
|
||||
it++;
|
||||
if (it != scheduled_queue[src_core].end()) {
|
||||
Thread* thread_on_core = scheduled_queue[src_core].front();
|
||||
Thread* to_change = *it;
|
||||
if (thread_on_core->IsRunning() || to_change->IsRunning()) {
|
||||
UnloadThread(src_core);
|
||||
}
|
||||
TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
|
||||
current_thread = thread_on_core;
|
||||
break;
|
||||
}
|
||||
}
|
||||
update_thread(current_thread, sched);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThread takes a thread and moves it to the back of the it's priority list
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
|
||||
// Note: caller should use critical section, etc.
|
||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
||||
const u32 priority = yielding_thread->GetPriority();
|
||||
|
||||
// Yield the thread
|
||||
ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
|
||||
"Thread yielding without being in front");
|
||||
scheduled_queue[core_id].yield(priority);
|
||||
|
||||
Thread* winner = scheduled_queue[core_id].front(priority);
|
||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
|
||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||
* a better priority than the next thread in the core.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
||||
// etc.
|
||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
||||
const u32 priority = yielding_thread->GetPriority();
|
||||
|
||||
// Yield the thread
|
||||
ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
|
||||
"Thread yielding without being in front");
|
||||
scheduled_queue[core_id].yield(priority);
|
||||
|
||||
std::array<Thread*, NUM_CPU_CORES> current_threads;
|
||||
for (u32 i = 0; i < NUM_CPU_CORES; i++) {
|
||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
||||
}
|
||||
|
||||
Thread* next_thread = scheduled_queue[core_id].front(priority);
|
||||
Thread* winner = nullptr;
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core >= 0) {
|
||||
if (current_threads[source_core] != nullptr) {
|
||||
if (thread == current_threads[source_core] ||
|
||||
current_threads[source_core]->GetPriority() < min_regular_priority) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
|
||||
next_thread->GetPriority() < thread->GetPriority()) {
|
||||
if (thread->GetPriority() <= priority) {
|
||||
winner = thread;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
}
|
||||
} else {
|
||||
winner = next_thread;
|
||||
}
|
||||
|
||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
|
||||
* and into the suggested queue. If no thread can be squeduled afterwards in that core,
|
||||
* a suggested thread is obtained instead.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
|
||||
// Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
|
||||
// etc.
|
||||
Thread* winner = nullptr;
|
||||
const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
|
||||
|
||||
// Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
|
||||
TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
|
||||
|
||||
// If the core is idle, perform load balancing, excluding the threads that have just used this
|
||||
// function...
|
||||
if (scheduled_queue[core_id].empty()) {
|
||||
// Here, "current_threads" is calculated after the ""yield"", unlike yield -1
|
||||
std::array<Thread*, NUM_CPU_CORES> current_threads;
|
||||
for (u32 i = 0; i < NUM_CPU_CORES; i++) {
|
||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
||||
}
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core < 0 || thread == current_threads[source_core]) {
|
||||
continue;
|
||||
}
|
||||
if (current_threads[source_core] == nullptr ||
|
||||
current_threads[source_core]->GetPriority() >= min_regular_priority) {
|
||||
winner = thread;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
}
|
||||
} else {
|
||||
winner = yielding_thread;
|
||||
}
|
||||
}
|
||||
|
||||
return AskForReselectionOrMarkRedundant(yielding_thread, winner);
|
||||
}
|
||||
|
||||
void GlobalScheduler::PreemptThreads() {
|
||||
for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
|
||||
if (scheduled_queue[core_id].size(priority) > 0) {
|
||||
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
||||
scheduled_queue[core_id].yield(priority);
|
||||
if (scheduled_queue[core_id].size(priority) > 1) {
|
||||
scheduled_queue[core_id].front(priority)->IncrementYieldCount();
|
||||
}
|
||||
}
|
||||
|
||||
Thread* current_thread =
|
||||
scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
|
||||
Thread* winner = nullptr;
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (thread->GetPriority() != priority) {
|
||||
continue;
|
||||
}
|
||||
if (source_core >= 0) {
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[source_core].front();
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
current_thread =
|
||||
winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
|
||||
}
|
||||
|
||||
if (current_thread != nullptr && current_thread->GetPriority() > priority) {
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (thread->GetPriority() < priority) {
|
||||
continue;
|
||||
}
|
||||
if (source_core >= 0) {
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[source_core].front();
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner->IsRunning()) {
|
||||
UnloadThread(winner->GetProcessorID());
|
||||
}
|
||||
TransferToCore(winner->GetPriority(), core_id, winner);
|
||||
current_thread = winner;
|
||||
}
|
||||
}
|
||||
|
||||
is_reselection_pending.store(true, std::memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
|
||||
suggested_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
|
||||
suggested_queue[core].remove(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
||||
scheduled_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
|
||||
ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
|
||||
scheduled_queue[core].add(thread, priority, false);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
|
||||
scheduled_queue[core].remove(thread, priority);
|
||||
scheduled_queue[core].add(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
|
||||
scheduled_queue[core].remove(thread, priority);
|
||||
}
|
||||
|
||||
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
|
||||
const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core == destination_core || !schedulable) {
|
||||
return;
|
||||
}
|
||||
thread->SetProcessorID(destination_core);
|
||||
if (source_core >= 0) {
|
||||
Unschedule(priority, source_core, thread);
|
||||
}
|
||||
if (destination_core >= 0) {
|
||||
Unsuggest(priority, destination_core, thread);
|
||||
Schedule(priority, destination_core, thread);
|
||||
}
|
||||
if (source_core >= 0) {
|
||||
Suggest(priority, source_core, thread);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
|
||||
if (current_thread == winner) {
|
||||
current_thread->IncrementYieldCount();
|
||||
return true;
|
||||
} else {
|
||||
is_reselection_pending.store(true, std::memory_order_release);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void GlobalScheduler::Shutdown() {
|
||||
for (std::size_t core = 0; core < NUM_CPU_CORES; core++) {
|
||||
scheduled_queue[core].clear();
|
||||
suggested_queue[core].clear();
|
||||
}
|
||||
thread_list.clear();
|
||||
}
|
||||
|
||||
GlobalScheduler::~GlobalScheduler() = default;
|
||||
|
||||
Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id)
|
||||
: system(system), cpu_core(cpu_core), core_id(core_id) {}
|
||||
|
||||
Scheduler::~Scheduler() = default;
|
||||
|
||||
bool Scheduler::HaveReadyThreads() const {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
return !ready_queue.empty();
|
||||
return system.GlobalScheduler().HaveReadyThreads(core_id);
|
||||
}
|
||||
|
||||
Thread* Scheduler::GetCurrentThread() const {
|
||||
return current_thread.get();
|
||||
}
|
||||
|
||||
Thread* Scheduler::GetSelectedThread() const {
|
||||
return selected_thread.get();
|
||||
}
|
||||
|
||||
void Scheduler::SelectThreads() {
|
||||
system.GlobalScheduler().SelectThread(core_id);
|
||||
}
|
||||
|
||||
u64 Scheduler::GetLastContextSwitchTicks() const {
|
||||
return last_context_switch_time;
|
||||
}
|
||||
|
||||
Thread* Scheduler::PopNextReadyThread() {
|
||||
Thread* next = nullptr;
|
||||
Thread* thread = GetCurrentThread();
|
||||
|
||||
if (thread && thread->GetStatus() == ThreadStatus::Running) {
|
||||
if (ready_queue.empty()) {
|
||||
return thread;
|
||||
}
|
||||
// We have to do better than the current thread.
|
||||
// This call returns null when that's not possible.
|
||||
next = ready_queue.front();
|
||||
if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
|
||||
next = thread;
|
||||
}
|
||||
} else {
|
||||
if (ready_queue.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
next = ready_queue.front();
|
||||
void Scheduler::TryDoContextSwitch() {
|
||||
if (is_context_switch_pending) {
|
||||
SwitchContext();
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
void Scheduler::SwitchContext(Thread* new_thread) {
|
||||
Thread* previous_thread = GetCurrentThread();
|
||||
void Scheduler::UnloadThread() {
|
||||
Thread* const previous_thread = GetCurrentThread();
|
||||
Process* const previous_process = system.Kernel().CurrentProcess();
|
||||
|
||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||
@@ -80,23 +434,52 @@ void Scheduler::SwitchContext(Thread* new_thread) {
|
||||
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
||||
// This is only the case when a reschedule is triggered without the current thread
|
||||
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
|
||||
ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
|
||||
previous_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
previous_thread->SetIsRunning(false);
|
||||
}
|
||||
current_thread = nullptr;
|
||||
}
|
||||
|
||||
void Scheduler::SwitchContext() {
|
||||
Thread* const previous_thread = GetCurrentThread();
|
||||
Thread* const new_thread = GetSelectedThread();
|
||||
|
||||
is_context_switch_pending = false;
|
||||
if (new_thread == previous_thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
Process* const previous_process = system.Kernel().CurrentProcess();
|
||||
|
||||
UpdateLastContextSwitchTime(previous_thread, previous_process);
|
||||
|
||||
// Save context for previous thread
|
||||
if (previous_thread) {
|
||||
cpu_core.SaveContext(previous_thread->GetContext());
|
||||
// Save the TPIDR_EL0 system register in case it was modified.
|
||||
previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||
|
||||
if (previous_thread->GetStatus() == ThreadStatus::Running) {
|
||||
// This is only the case when a reschedule is triggered without the current thread
|
||||
// yielding execution (i.e. an event triggered, system core time-sliced, etc)
|
||||
previous_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
previous_thread->SetIsRunning(false);
|
||||
}
|
||||
|
||||
// Load context of new thread
|
||||
if (new_thread) {
|
||||
ASSERT_MSG(new_thread->GetProcessorID() == this->core_id,
|
||||
"Thread must be assigned to this core.");
|
||||
ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
|
||||
"Thread must be ready to become running.");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
new_thread->CancelWakeupTimer();
|
||||
|
||||
current_thread = new_thread;
|
||||
|
||||
ready_queue.remove(new_thread, new_thread->GetPriority());
|
||||
new_thread->SetStatus(ThreadStatus::Running);
|
||||
new_thread->SetIsRunning(true);
|
||||
|
||||
auto* const thread_owner_process = current_thread->GetOwnerProcess();
|
||||
if (previous_process != thread_owner_process) {
|
||||
@@ -130,124 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
|
||||
last_context_switch_time = most_recent_switch_ticks;
|
||||
}
|
||||
|
||||
void Scheduler::Reschedule() {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
Thread* cur = GetCurrentThread();
|
||||
Thread* next = PopNextReadyThread();
|
||||
|
||||
if (cur && next) {
|
||||
LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
|
||||
} else if (cur) {
|
||||
LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
|
||||
} else if (next) {
|
||||
LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
|
||||
}
|
||||
|
||||
SwitchContext(next);
|
||||
}
|
||||
|
||||
void Scheduler::AddThread(SharedPtr<Thread> thread) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
thread_list.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void Scheduler::RemoveThread(Thread* thread) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
}
|
||||
|
||||
void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.add(thread, priority);
|
||||
}
|
||||
|
||||
void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Ready);
|
||||
ready_queue.remove(thread, priority);
|
||||
}
|
||||
|
||||
void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
if (thread->GetPriority() == priority) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If thread was ready, adjust queues
|
||||
if (thread->GetStatus() == ThreadStatus::Ready)
|
||||
ready_queue.adjust(thread, thread->GetPriority(), priority);
|
||||
}
|
||||
|
||||
Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
|
||||
std::lock_guard lock{scheduler_mutex};
|
||||
|
||||
const u32 mask = 1U << core;
|
||||
for (auto* thread : ready_queue) {
|
||||
if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
|
||||
ASSERT(thread != nullptr);
|
||||
// Avoid yielding if the thread isn't even running.
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Running);
|
||||
|
||||
// Sanity check that the priority is valid
|
||||
ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
|
||||
|
||||
// Yield this thread -- sleep for zero time and force reschedule to different thread
|
||||
GetCurrentThread()->Sleep(0);
|
||||
}
|
||||
|
||||
void Scheduler::YieldWithLoadBalancing(Thread* thread) {
|
||||
ASSERT(thread != nullptr);
|
||||
const auto priority = thread->GetPriority();
|
||||
const auto core = static_cast<u32>(thread->GetProcessorID());
|
||||
|
||||
// Avoid yielding if the thread isn't even running.
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Running);
|
||||
|
||||
// Sanity check that the priority is valid
|
||||
ASSERT(priority < THREADPRIO_COUNT);
|
||||
|
||||
// Sleep for zero time to be able to force reschedule to different thread
|
||||
GetCurrentThread()->Sleep(0);
|
||||
|
||||
Thread* suggested_thread = nullptr;
|
||||
|
||||
// Search through all of the cpu cores (except this one) for a suggested thread.
|
||||
// Take the first non-nullptr one
|
||||
for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
|
||||
const auto res =
|
||||
system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
|
||||
|
||||
// If scheduler provides a suggested thread
|
||||
if (res != nullptr) {
|
||||
// And its better than the current suggested thread (or is the first valid one)
|
||||
if (suggested_thread == nullptr ||
|
||||
suggested_thread->GetPriority() > res->GetPriority()) {
|
||||
suggested_thread = res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If a suggested thread was found, queue that for this core
|
||||
if (suggested_thread != nullptr)
|
||||
suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
|
||||
}
|
||||
|
||||
void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
|
||||
UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
|
||||
void Scheduler::Shutdown() {
|
||||
current_thread = nullptr;
|
||||
selected_thread = nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -20,124 +20,172 @@ namespace Kernel {
|
||||
|
||||
class Process;
|
||||
|
||||
class Scheduler final {
|
||||
class GlobalScheduler final {
|
||||
public:
|
||||
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core);
|
||||
~Scheduler();
|
||||
|
||||
/// Returns whether there are any threads that are ready to run.
|
||||
bool HaveReadyThreads() const;
|
||||
|
||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||
void Reschedule();
|
||||
|
||||
/// Gets the current running thread
|
||||
Thread* GetCurrentThread() const;
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
u64 GetLastContextSwitchTicks() const;
|
||||
static constexpr u32 NUM_CPU_CORES = 4;
|
||||
|
||||
explicit GlobalScheduler(Core::System& system);
|
||||
~GlobalScheduler();
|
||||
/// Adds a new thread to the scheduler
|
||||
void AddThread(SharedPtr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the scheduler
|
||||
void RemoveThread(Thread* thread);
|
||||
|
||||
/// Schedules a thread that has become "ready"
|
||||
void ScheduleThread(Thread* thread, u32 priority);
|
||||
|
||||
/// Unschedules a thread that was already scheduled
|
||||
void UnscheduleThread(Thread* thread, u32 priority);
|
||||
|
||||
/// Sets the priority of a thread in the scheduler
|
||||
void SetThreadPriority(Thread* thread, u32 priority);
|
||||
|
||||
/// Gets the next suggested thread for load balancing
|
||||
Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const;
|
||||
|
||||
/**
|
||||
* YieldWithoutLoadBalancing -- analogous to normal yield on a system
|
||||
* Moves the thread to the end of the ready queue for its priority, and then reschedules the
|
||||
* system to the new head of the queue.
|
||||
*
|
||||
* Example (Single Core -- but can be extrapolated to multi):
|
||||
* ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->)
|
||||
* Currently Running: ThreadR
|
||||
*
|
||||
* ThreadR calls YieldWithoutLoadBalancing
|
||||
*
|
||||
* ThreadR is moved to the end of ready_queue[prio=0]:
|
||||
* ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->)
|
||||
* Currently Running: Nothing
|
||||
*
|
||||
* System is rescheduled (ThreadA is popped off of queue):
|
||||
* ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->)
|
||||
* Currently Running: ThreadA
|
||||
*
|
||||
* If the queue is empty at time of call, no yielding occurs. This does not cross between cores
|
||||
* or priorities at all.
|
||||
*/
|
||||
void YieldWithoutLoadBalancing(Thread* thread);
|
||||
|
||||
/**
|
||||
* YieldWithLoadBalancing -- yield but with better selection of the new running thread
|
||||
* Moves the current thread to the end of the ready queue for its priority, then selects a
|
||||
* 'suggested thread' (a thread on a different core that could run on this core) from the
|
||||
* scheduler, changes its core, and reschedules the current core to that thread.
|
||||
*
|
||||
* Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were
|
||||
* single core):
|
||||
* ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant
|
||||
* ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
|
||||
* Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
|
||||
*
|
||||
* ThreadQ calls YieldWithLoadBalancing
|
||||
*
|
||||
* ThreadQ is moved to the end of ready_queue[core=0][prio=0]:
|
||||
* ready_queue[core=0][prio=0]: ThreadA, ThreadB
|
||||
* ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
|
||||
* Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
|
||||
*
|
||||
* A list of suggested threads for each core is compiled
|
||||
* Suggested Threads: {ThreadC on Core 1}
|
||||
* If this were quad core (as the switch is), there could be between 0 and 3 threads in this
|
||||
* list. If there are more than one, the thread is selected by highest prio.
|
||||
*
|
||||
* ThreadC is core changed to Core 0:
|
||||
* ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ
|
||||
* ready_queue[core=1][prio=0]: ThreadD
|
||||
* Currently Running: None on Core 0 || ThreadP on Core 1
|
||||
*
|
||||
* System is rescheduled (ThreadC is popped off of queue):
|
||||
* ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ
|
||||
* ready_queue[core=1][prio=0]: ThreadD
|
||||
* Currently Running: ThreadC on Core 0 || ThreadP on Core 1
|
||||
*
|
||||
* If no suggested threads can be found this will behave just as normal yield. If there are
|
||||
* multiple candidates for the suggested thread on a core, the highest prio is taken.
|
||||
*/
|
||||
void YieldWithLoadBalancing(Thread* thread);
|
||||
|
||||
/// Currently unknown -- asserts as unimplemented on call
|
||||
void YieldAndWaitForLoadBalancing(Thread* thread);
|
||||
void RemoveThread(const Thread* thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
const std::vector<SharedPtr<Thread>>& GetThreadList() const {
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* Pops and returns the next thread from the thread queue
|
||||
* @return A pointer to the next ready thread
|
||||
*/
|
||||
Thread* PopNextReadyThread();
|
||||
// Add a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
// picked if no thread is scheduled to run on the core.
|
||||
void Suggest(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Remove a thread to the suggested queue of a cpu core. Suggested threads may be
|
||||
// picked if no thread is scheduled to run on the core.
|
||||
void Unsuggest(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
// back the queue in its priority level
|
||||
void Schedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Add a thread to the scheduling queue of a cpu core. The thread is added at the
|
||||
// front the queue in its priority level
|
||||
void SchedulePrepend(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Reschedule an already scheduled thread based on a new priority
|
||||
void Reschedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Unschedule a thread.
|
||||
void Unschedule(u32 priority, u32 core, Thread* thread);
|
||||
|
||||
// Transfers a thread into an specific core. If the destination_core is -1
|
||||
// it will be unscheduled from its source code and added into its suggested
|
||||
// queue.
|
||||
void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
|
||||
|
||||
/*
|
||||
* UnloadThread selects a core and forces it to unload its current thread's context
|
||||
*/
|
||||
void UnloadThread(s32 core);
|
||||
|
||||
/*
|
||||
* SelectThread takes care of selecting the new scheduled thread.
|
||||
* It does it in 3 steps:
|
||||
* - First a thread is selected from the top of the priority queue. If no thread
|
||||
* is obtained then we move to step two, else we are done.
|
||||
* - Second we try to get a suggested thread that's not assigned to any core or
|
||||
* that is not the top thread in that core.
|
||||
* - Third is no suggested thread is found, we do a second pass and pick a running
|
||||
* thread in another core and swap it with its current thread.
|
||||
*/
|
||||
void SelectThread(u32 core);
|
||||
|
||||
bool HaveReadyThreads(u32 core_id) const {
|
||||
return !scheduled_queue[core_id].empty();
|
||||
}
|
||||
|
||||
/*
|
||||
* YieldThread takes a thread and moves it to the back of the it's priority list
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThread(Thread* thread);
|
||||
|
||||
/*
|
||||
* YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
|
||||
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
|
||||
* a better priority than the next thread in the core.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThreadAndBalanceLoad(Thread* thread);
|
||||
|
||||
/*
|
||||
* YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
|
||||
* and into the suggested queue. If no thread can be squeduled afterwards in that core,
|
||||
* a suggested thread is obtained instead.
|
||||
* This operation can be redundant and no scheduling is changed if marked as so.
|
||||
*/
|
||||
bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
|
||||
|
||||
/*
|
||||
* PreemptThreads this operation rotates the scheduling queues of threads at
|
||||
* a preemption priority and then does some core rebalancing. Preemption priorities
|
||||
* can be found in the array 'preemption_priorities'. This operation happens
|
||||
* every 10ms.
|
||||
*/
|
||||
void PreemptThreads();
|
||||
|
||||
u32 CpuCoresCount() const {
|
||||
return NUM_CPU_CORES;
|
||||
}
|
||||
|
||||
void SetReselectionPending() {
|
||||
is_reselection_pending.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
bool IsReselectionPending() const {
|
||||
return is_reselection_pending.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
void Shutdown();
|
||||
|
||||
private:
|
||||
bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
|
||||
|
||||
static constexpr u32 min_regular_priority = 2;
|
||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
|
||||
std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
|
||||
std::atomic<bool> is_reselection_pending;
|
||||
|
||||
// `preemption_priorities` are the priority levels at which the global scheduler
|
||||
// preempts threads every 10 ms. They are ordered from Core 0 to Core 3
|
||||
std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<SharedPtr<Thread>> thread_list;
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
class Scheduler final {
|
||||
public:
|
||||
explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
|
||||
~Scheduler();
|
||||
|
||||
/// Returns whether there are any threads that are ready to run.
|
||||
bool HaveReadyThreads() const;
|
||||
|
||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||
void TryDoContextSwitch();
|
||||
|
||||
/// Unloads currently running thread
|
||||
void UnloadThread();
|
||||
|
||||
/// Select the threads in top of the scheduling multilist.
|
||||
void SelectThreads();
|
||||
|
||||
/// Gets the current running thread
|
||||
Thread* GetCurrentThread() const;
|
||||
|
||||
/// Gets the currently selected thread from the top of the multilevel queue
|
||||
Thread* GetSelectedThread() const;
|
||||
|
||||
/// Gets the timestamp for the last context switch in ticks.
|
||||
u64 GetLastContextSwitchTicks() const;
|
||||
|
||||
bool ContextSwitchPending() const {
|
||||
return is_context_switch_pending;
|
||||
}
|
||||
|
||||
/// Shutdowns the scheduler.
|
||||
void Shutdown();
|
||||
|
||||
private:
|
||||
friend class GlobalScheduler;
|
||||
/**
|
||||
* Switches the CPU's active thread context to that of the specified thread
|
||||
* @param new_thread The thread to switch to
|
||||
*/
|
||||
void SwitchContext(Thread* new_thread);
|
||||
void SwitchContext();
|
||||
|
||||
/**
|
||||
* Called on every context switch to update the internal timestamp
|
||||
@@ -152,19 +200,16 @@ private:
|
||||
*/
|
||||
void UpdateLastContextSwitchTime(Thread* thread, Process* process);
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<SharedPtr<Thread>> thread_list;
|
||||
|
||||
/// Lists only ready thread ids.
|
||||
Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
|
||||
|
||||
SharedPtr<Thread> current_thread = nullptr;
|
||||
|
||||
Core::ARM_Interface& cpu_core;
|
||||
u64 last_context_switch_time = 0;
|
||||
SharedPtr<Thread> selected_thread = nullptr;
|
||||
|
||||
Core::System& system;
|
||||
static std::mutex scheduler_mutex;
|
||||
Core::ARM_Interface& cpu_core;
|
||||
u64 last_context_switch_time = 0;
|
||||
u64 idle_selection_count = 0;
|
||||
const u32 core_id;
|
||||
|
||||
bool is_context_switch_pending = false;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -516,7 +516,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
||||
thread->WakeAfterDelay(nano_seconds);
|
||||
thread->SetWakeupCallback(DefaultThreadWakeupCallback);
|
||||
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
@@ -534,6 +534,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand
|
||||
}
|
||||
|
||||
thread->CancelWait();
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -577,7 +578,8 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
|
||||
}
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
return current_process->GetMutex().Release(mutex_addr);
|
||||
return current_process->GetMutex().Release(mutex_addr,
|
||||
system.CurrentScheduler().GetCurrentThread());
|
||||
}
|
||||
|
||||
enum class BreakType : u32 {
|
||||
@@ -1066,6 +1068,8 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
|
||||
}
|
||||
|
||||
thread->SetActivity(static_cast<ThreadActivity>(activity));
|
||||
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1147,7 +1151,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
|
||||
|
||||
thread->SetPriority(priority);
|
||||
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1503,7 +1507,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||
thread->SetName(
|
||||
fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle));
|
||||
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -1525,7 +1529,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
thread->ResumeFromWait();
|
||||
|
||||
if (thread->GetStatus() == ThreadStatus::Ready) {
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
@@ -1537,7 +1541,7 @@ static void ExitThread(Core::System& system) {
|
||||
|
||||
auto* const current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
current_thread->Stop();
|
||||
system.CurrentScheduler().RemoveThread(current_thread);
|
||||
system.GlobalScheduler().RemoveThread(current_thread);
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
|
||||
@@ -1553,17 +1557,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
|
||||
|
||||
auto& scheduler = system.CurrentScheduler();
|
||||
auto* const current_thread = scheduler.GetCurrentThread();
|
||||
bool is_redundant = false;
|
||||
|
||||
if (nanoseconds <= 0) {
|
||||
switch (static_cast<SleepType>(nanoseconds)) {
|
||||
case SleepType::YieldWithoutLoadBalancing:
|
||||
scheduler.YieldWithoutLoadBalancing(current_thread);
|
||||
is_redundant = current_thread->YieldSimple();
|
||||
break;
|
||||
case SleepType::YieldWithLoadBalancing:
|
||||
scheduler.YieldWithLoadBalancing(current_thread);
|
||||
is_redundant = current_thread->YieldAndBalanceLoad();
|
||||
break;
|
||||
case SleepType::YieldAndWaitForLoadBalancing:
|
||||
scheduler.YieldAndWaitForLoadBalancing(current_thread);
|
||||
is_redundant = current_thread->YieldAndWaitForLoadBalancing();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
||||
@@ -1572,10 +1577,13 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
|
||||
current_thread->Sleep(nanoseconds);
|
||||
}
|
||||
|
||||
// Reschedule all CPU cores
|
||||
for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) {
|
||||
system.CpuCore(i).PrepareReschedule();
|
||||
if (is_redundant) {
|
||||
// If it's redundant, the core is pretty much idle. Some games keep idling
|
||||
// a core while it's doing nothing, we advance timing to avoid costly continuous
|
||||
// calls.
|
||||
system.CoreTiming().AddTicks(2000);
|
||||
}
|
||||
system.PrepareReschedule(current_thread->GetProcessorID());
|
||||
}
|
||||
|
||||
/// Wait process wide key atomic
|
||||
@@ -1601,17 +1609,21 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
ASSERT(thread);
|
||||
|
||||
const auto release_result = current_process->GetMutex().Release(mutex_addr);
|
||||
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
|
||||
const auto release_result =
|
||||
current_process->GetMutex().Release(mutex_addr, current_thread.get());
|
||||
if (release_result.IsError()) {
|
||||
return release_result;
|
||||
}
|
||||
|
||||
SharedPtr<Thread> current_thread = system.CurrentScheduler().GetCurrentThread();
|
||||
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
||||
current_thread->SetMutexWaitAddress(mutex_addr);
|
||||
current_thread->SetWaitHandle(thread_handle);
|
||||
@@ -1622,7 +1634,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
|
||||
|
||||
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
||||
|
||||
system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule();
|
||||
system.PrepareReschedule(current_thread->GetProcessorID());
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1632,24 +1644,19 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
const auto RetrieveWaitingThreads = [&system](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr condvar_addr) {
|
||||
const auto& scheduler = system.Scheduler(core_index);
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetCondVarWaitAddress() == condvar_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
|
||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||
std::vector<SharedPtr<Thread>> waiting_threads;
|
||||
RetrieveWaitingThreads(0, waiting_threads, condition_variable_addr);
|
||||
RetrieveWaitingThreads(1, waiting_threads, condition_variable_addr);
|
||||
RetrieveWaitingThreads(2, waiting_threads, condition_variable_addr);
|
||||
RetrieveWaitingThreads(3, waiting_threads, condition_variable_addr);
|
||||
const auto& scheduler = system.GlobalScheduler();
|
||||
const auto& thread_list = scheduler.GetThreadList();
|
||||
|
||||
for (const auto& thread : thread_list) {
|
||||
if (thread->GetCondVarWaitAddress() == condition_variable_addr) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort them by priority, such that the highest priority ones come first.
|
||||
std::sort(waiting_threads.begin(), waiting_threads.end(),
|
||||
[](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) {
|
||||
@@ -1679,18 +1686,20 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
|
||||
// Atomically read the value of the mutex.
|
||||
u32 mutex_val = 0;
|
||||
u32 update_val = 0;
|
||||
const VAddr mutex_address = thread->GetMutexWaitAddress();
|
||||
do {
|
||||
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
||||
monitor.SetExclusive(current_core, mutex_address);
|
||||
|
||||
// If the mutex is not yet acquired, acquire it.
|
||||
mutex_val = Memory::Read32(thread->GetMutexWaitAddress());
|
||||
mutex_val = Memory::Read32(mutex_address);
|
||||
|
||||
if (mutex_val != 0) {
|
||||
monitor.ClearExclusive();
|
||||
break;
|
||||
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
|
||||
} else {
|
||||
update_val = thread->GetWaitHandle();
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
||||
thread->GetWaitHandle()));
|
||||
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
|
||||
if (mutex_val == 0) {
|
||||
// We were able to acquire the mutex, resume this thread.
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
|
||||
@@ -1704,20 +1713,9 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
thread->SetLockOwner(nullptr);
|
||||
thread->SetMutexWaitAddress(0);
|
||||
thread->SetWaitHandle(0);
|
||||
system.CpuCore(thread->GetProcessorID()).PrepareReschedule();
|
||||
thread->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
} else {
|
||||
// Atomically signal that the mutex now has a waiting thread.
|
||||
do {
|
||||
monitor.SetExclusive(current_core, thread->GetMutexWaitAddress());
|
||||
|
||||
// Ensure that the mutex value is still what we expect.
|
||||
u32 value = Memory::Read32(thread->GetMutexWaitAddress());
|
||||
// TODO(Subv): When this happens, the kernel just clears the exclusive state and
|
||||
// retries the initial read for this thread.
|
||||
ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case");
|
||||
} while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
|
||||
mutex_val | Mutex::MutexHasWaitersFlag));
|
||||
|
||||
// The mutex is already owned by some other thread, make this thread wait on it.
|
||||
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
@@ -1728,6 +1726,7 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
|
||||
thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
|
||||
owner->AddMutexWaiter(thread);
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1754,7 +1753,12 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
|
||||
|
||||
const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
|
||||
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
||||
return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
|
||||
const ResultCode result =
|
||||
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
|
||||
if (result == RESULT_SUCCESS) {
|
||||
system.PrepareReschedule();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Signals to an address (via Address Arbiter)
|
||||
@@ -2040,7 +2044,10 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
thread->ChangeCore(core, affinity_mask);
|
||||
system.PrepareReschedule(thread->GetProcessorID());
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -2151,6 +2158,7 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) {
|
||||
}
|
||||
|
||||
writable_event->Signal();
|
||||
system.PrepareReschedule();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -45,15 +45,7 @@ void Thread::Stop() {
|
||||
callback_handle);
|
||||
kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
|
||||
callback_handle = 0;
|
||||
|
||||
// Clean up thread from ready queue
|
||||
// This is only needed when the thread is terminated forcefully (SVC TerminateProcess)
|
||||
if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) {
|
||||
scheduler->UnscheduleThread(this, current_priority);
|
||||
}
|
||||
|
||||
status = ThreadStatus::Dead;
|
||||
|
||||
SetStatus(ThreadStatus::Dead);
|
||||
WakeupAllWaitingThreads();
|
||||
|
||||
// Clean up any dangling references in objects that this thread was waiting for
|
||||
@@ -132,17 +124,16 @@ void Thread::ResumeFromWait() {
|
||||
wakeup_callback = nullptr;
|
||||
|
||||
if (activity == ThreadActivity::Paused) {
|
||||
status = ThreadStatus::Paused;
|
||||
SetStatus(ThreadStatus::Paused);
|
||||
return;
|
||||
}
|
||||
|
||||
status = ThreadStatus::Ready;
|
||||
|
||||
ChangeScheduler();
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
ASSERT(GetStatus() == ThreadStatus::WaitSynch);
|
||||
ClearWaitObjects();
|
||||
SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
|
||||
ResumeFromWait();
|
||||
}
|
||||
@@ -205,9 +196,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
|
||||
thread->name = std::move(name);
|
||||
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = &owner_process;
|
||||
auto& scheduler = kernel.GlobalScheduler();
|
||||
scheduler.AddThread(thread);
|
||||
thread->tls_address = thread->owner_process->CreateTLSRegion();
|
||||
thread->scheduler = &system.Scheduler(processor_id);
|
||||
thread->scheduler->AddThread(thread);
|
||||
|
||||
thread->owner_process->RegisterThread(thread.get());
|
||||
|
||||
@@ -250,6 +241,22 @@ void Thread::SetStatus(ThreadStatus new_status) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (new_status) {
|
||||
case ThreadStatus::Ready:
|
||||
case ThreadStatus::Running:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Runnable);
|
||||
break;
|
||||
case ThreadStatus::Dormant:
|
||||
SetSchedulingStatus(ThreadSchedStatus::None);
|
||||
break;
|
||||
case ThreadStatus::Dead:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Exited);
|
||||
break;
|
||||
default:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Paused);
|
||||
break;
|
||||
}
|
||||
|
||||
if (status == ThreadStatus::Running) {
|
||||
last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
|
||||
}
|
||||
@@ -311,8 +318,7 @@ void Thread::UpdatePriority() {
|
||||
return;
|
||||
}
|
||||
|
||||
scheduler->SetThreadPriority(this, new_priority);
|
||||
current_priority = new_priority;
|
||||
SetCurrentPriority(new_priority);
|
||||
|
||||
if (!lock_owner) {
|
||||
return;
|
||||
@@ -328,47 +334,7 @@ void Thread::UpdatePriority() {
|
||||
}
|
||||
|
||||
void Thread::ChangeCore(u32 core, u64 mask) {
|
||||
ideal_core = core;
|
||||
affinity_mask = mask;
|
||||
ChangeScheduler();
|
||||
}
|
||||
|
||||
void Thread::ChangeScheduler() {
|
||||
if (status != ThreadStatus::Ready) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)};
|
||||
|
||||
if (!new_processor_id) {
|
||||
new_processor_id = processor_id;
|
||||
}
|
||||
if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) {
|
||||
new_processor_id = ideal_core;
|
||||
}
|
||||
|
||||
ASSERT(*new_processor_id < 4);
|
||||
|
||||
// Add thread to new core's scheduler
|
||||
auto& next_scheduler = system.Scheduler(*new_processor_id);
|
||||
|
||||
if (*new_processor_id != processor_id) {
|
||||
// Remove thread from previous core's scheduler
|
||||
scheduler->RemoveThread(this);
|
||||
next_scheduler.AddThread(this);
|
||||
}
|
||||
|
||||
processor_id = *new_processor_id;
|
||||
|
||||
// If the thread was ready, unschedule from the previous core and schedule on the new core
|
||||
scheduler->UnscheduleThread(this, current_priority);
|
||||
next_scheduler.ScheduleThread(this, current_priority);
|
||||
|
||||
// Change thread's scheduler
|
||||
scheduler = &next_scheduler;
|
||||
|
||||
system.CpuCore(processor_id).PrepareReschedule();
|
||||
SetCoreAndAffinityMask(core, mask);
|
||||
}
|
||||
|
||||
bool Thread::AllWaitObjectsReady() const {
|
||||
@@ -388,10 +354,8 @@ void Thread::SetActivity(ThreadActivity value) {
|
||||
|
||||
if (value == ThreadActivity::Paused) {
|
||||
// Set status if not waiting
|
||||
if (status == ThreadStatus::Ready) {
|
||||
status = ThreadStatus::Paused;
|
||||
} else if (status == ThreadStatus::Running) {
|
||||
status = ThreadStatus::Paused;
|
||||
if (status == ThreadStatus::Ready || status == ThreadStatus::Running) {
|
||||
SetStatus(ThreadStatus::Paused);
|
||||
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
|
||||
}
|
||||
} else if (status == ThreadStatus::Paused) {
|
||||
@@ -408,6 +372,170 @@ void Thread::Sleep(s64 nanoseconds) {
|
||||
WakeAfterDelay(nanoseconds);
|
||||
}
|
||||
|
||||
bool Thread::YieldSimple() {
|
||||
auto& scheduler = kernel.GlobalScheduler();
|
||||
return scheduler.YieldThread(this);
|
||||
}
|
||||
|
||||
bool Thread::YieldAndBalanceLoad() {
|
||||
auto& scheduler = kernel.GlobalScheduler();
|
||||
return scheduler.YieldThreadAndBalanceLoad(this);
|
||||
}
|
||||
|
||||
bool Thread::YieldAndWaitForLoadBalancing() {
|
||||
auto& scheduler = kernel.GlobalScheduler();
|
||||
return scheduler.YieldThreadAndWaitForLoadBalancing(this);
|
||||
}
|
||||
|
||||
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
|
||||
const u32 old_flags = scheduling_state;
|
||||
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
|
||||
static_cast<u32>(new_status);
|
||||
AdjustSchedulingOnStatus(old_flags);
|
||||
}
|
||||
|
||||
void Thread::SetCurrentPriority(u32 new_priority) {
|
||||
const u32 old_priority = std::exchange(current_priority, new_priority);
|
||||
AdjustSchedulingOnPriority(old_priority);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
for (s32 core = max_cores - 1; core >= 0; core--) {
|
||||
if (((mask >> core) & 1) != 0) {
|
||||
return core;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
const bool use_override = affinity_override_count != 0;
|
||||
if (new_core == THREADPROCESSORID_DONT_UPDATE) {
|
||||
new_core = use_override ? ideal_core_override : ideal_core;
|
||||
if ((new_affinity_mask & (1ULL << new_core)) == 0) {
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
}
|
||||
if (use_override) {
|
||||
ideal_core_override = new_core;
|
||||
affinity_mask_override = new_affinity_mask;
|
||||
} else {
|
||||
const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
|
||||
ideal_core = new_core;
|
||||
if (old_affinity_mask != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
|
||||
if (ideal_core < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
}
|
||||
}
|
||||
AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
|
||||
if (old_flags == scheduling_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto& scheduler = kernel.GlobalScheduler();
|
||||
if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
|
||||
ThreadSchedStatus::Runnable) {
|
||||
// In this case the thread was running, now it's pausing/exitting
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Unschedule(current_priority, processor_id, this);
|
||||
}
|
||||
|
||||
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Unsuggest(current_priority, static_cast<u32>(core), this);
|
||||
}
|
||||
}
|
||||
} else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
|
||||
// The thread is now set to running from being stopped
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Schedule(current_priority, processor_id, this);
|
||||
}
|
||||
|
||||
for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Suggest(current_priority, static_cast<u32>(core), this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scheduler.SetReselectionPending();
|
||||
}
|
||||
|
||||
void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
|
||||
if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
|
||||
return;
|
||||
}
|
||||
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
|
||||
if (processor_id >= 0) {
|
||||
scheduler.Unschedule(old_priority, processor_id, this);
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Unsuggest(old_priority, core, this);
|
||||
}
|
||||
}
|
||||
|
||||
// Add thread to the new priority queues.
|
||||
Thread* current_thread = GetCurrentThread();
|
||||
|
||||
if (processor_id >= 0) {
|
||||
if (current_thread == this) {
|
||||
scheduler.SchedulePrepend(current_priority, processor_id, this);
|
||||
} else {
|
||||
scheduler.Schedule(current_priority, processor_id, this);
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
|
||||
scheduler.Suggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
|
||||
scheduler.SetReselectionPending();
|
||||
}
|
||||
|
||||
void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
|
||||
auto& scheduler = Core::System::GetInstance().GlobalScheduler();
|
||||
if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
|
||||
current_priority >= THREADPRIO_COUNT) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (((old_affinity_mask >> core) & 1) != 0) {
|
||||
if (core == old_core) {
|
||||
scheduler.Unschedule(current_priority, core, this);
|
||||
} else {
|
||||
scheduler.Unsuggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
|
||||
if (((affinity_mask >> core) & 1) != 0) {
|
||||
if (core == processor_id) {
|
||||
scheduler.Schedule(current_priority, core, this);
|
||||
} else {
|
||||
scheduler.Suggest(current_priority, core, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scheduler.SetReselectionPending();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
|
||||
@@ -75,6 +75,26 @@ enum class ThreadActivity : u32 {
|
||||
Paused = 1,
|
||||
};
|
||||
|
||||
enum class ThreadSchedStatus : u32 {
|
||||
None = 0,
|
||||
Paused = 1,
|
||||
Runnable = 2,
|
||||
Exited = 3,
|
||||
};
|
||||
|
||||
enum class ThreadSchedFlags : u32 {
|
||||
ProcessPauseFlag = 1 << 4,
|
||||
ThreadPauseFlag = 1 << 5,
|
||||
ProcessDebugPauseFlag = 1 << 6,
|
||||
KernelInitPauseFlag = 1 << 8,
|
||||
};
|
||||
|
||||
enum class ThreadSchedMasks : u32 {
|
||||
LowMask = 0x000f,
|
||||
HighMask = 0xfff0,
|
||||
ForcePauseMask = 0x0070,
|
||||
};
|
||||
|
||||
class Thread final : public WaitObject {
|
||||
public:
|
||||
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
|
||||
@@ -278,6 +298,10 @@ public:
|
||||
return processor_id;
|
||||
}
|
||||
|
||||
void SetProcessorID(s32 new_core) {
|
||||
processor_id = new_core;
|
||||
}
|
||||
|
||||
Process* GetOwnerProcess() {
|
||||
return owner_process;
|
||||
}
|
||||
@@ -295,6 +319,9 @@ public:
|
||||
}
|
||||
|
||||
void ClearWaitObjects() {
|
||||
for (const auto& waiting_object : wait_objects) {
|
||||
waiting_object->RemoveWaitingThread(this);
|
||||
}
|
||||
wait_objects.clear();
|
||||
}
|
||||
|
||||
@@ -383,11 +410,47 @@ public:
|
||||
/// Sleeps this thread for the given amount of nanoseconds.
|
||||
void Sleep(s64 nanoseconds);
|
||||
|
||||
/// Yields this thread without rebalancing loads.
|
||||
bool YieldSimple();
|
||||
|
||||
/// Yields this thread and does a load rebalancing.
|
||||
bool YieldAndBalanceLoad();
|
||||
|
||||
/// Yields this thread and if the core is left idle, loads are rebalanced
|
||||
bool YieldAndWaitForLoadBalancing();
|
||||
|
||||
void IncrementYieldCount() {
|
||||
yield_count++;
|
||||
}
|
||||
|
||||
u64 GetYieldCount() const {
|
||||
return yield_count;
|
||||
}
|
||||
|
||||
ThreadSchedStatus GetSchedulingStatus() const {
|
||||
return static_cast<ThreadSchedStatus>(scheduling_state &
|
||||
static_cast<u32>(ThreadSchedMasks::LowMask));
|
||||
}
|
||||
|
||||
bool IsRunning() const {
|
||||
return is_running;
|
||||
}
|
||||
|
||||
void SetIsRunning(bool value) {
|
||||
is_running = value;
|
||||
}
|
||||
|
||||
private:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
||||
void ChangeScheduler();
|
||||
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
||||
void SetCurrentPriority(u32 new_priority);
|
||||
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
|
||||
void AdjustSchedulingOnStatus(u32 old_flags);
|
||||
void AdjustSchedulingOnPriority(u32 old_priority);
|
||||
void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
|
||||
|
||||
Core::ARM_Interface::ThreadContext context{};
|
||||
|
||||
@@ -409,6 +472,8 @@ private:
|
||||
|
||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||
u64 last_running_ticks = 0; ///< CPU tick when thread was last running
|
||||
u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
|
||||
///< a redundant yield is one where no scheduling is changed
|
||||
|
||||
s32 processor_id = 0;
|
||||
|
||||
@@ -453,6 +518,13 @@ private:
|
||||
|
||||
ThreadActivity activity = ThreadActivity::Normal;
|
||||
|
||||
s32 ideal_core_override = -1;
|
||||
u64 affinity_mask_override = 0x1;
|
||||
u32 affinity_override_count = 0;
|
||||
|
||||
u32 scheduling_state = 0;
|
||||
bool is_running = false;
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
|
||||
@@ -23,6 +23,8 @@ SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_
|
||||
transfer_memory->owner_permissions = permissions;
|
||||
transfer_memory->owner_process = kernel.CurrentProcess();
|
||||
|
||||
transfer_memory->MapMemory(base_address, size, permissions);
|
||||
|
||||
return transfer_memory;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
@@ -48,17 +51,8 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() const {
|
||||
if (ShouldWait(thread.get()))
|
||||
continue;
|
||||
|
||||
// A thread is ready to run if it's either in ThreadStatus::WaitSynch
|
||||
// and the rest of the objects it is waiting on are ready.
|
||||
bool ready_to_run = true;
|
||||
if (thread_status == ThreadStatus::WaitSynch) {
|
||||
ready_to_run = thread->AllWaitObjectsReady();
|
||||
}
|
||||
|
||||
if (ready_to_run) {
|
||||
candidate = thread.get();
|
||||
candidate_priority = thread->GetPriority();
|
||||
}
|
||||
candidate = thread.get();
|
||||
candidate_priority = thread->GetPriority();
|
||||
}
|
||||
|
||||
return candidate;
|
||||
@@ -82,9 +76,6 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
|
||||
|
||||
const std::size_t index = thread->GetWaitObjectIndex(this);
|
||||
|
||||
for (const auto& object : thread->GetWaitObjects()) {
|
||||
object->RemoveWaitingThread(thread.get());
|
||||
}
|
||||
thread->ClearWaitObjects();
|
||||
|
||||
thread->CancelWakeupTimer();
|
||||
@@ -95,6 +86,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
|
||||
}
|
||||
if (resume) {
|
||||
thread->ResumeFromWait();
|
||||
Core::System::GetInstance().PrepareReschedule(thread->GetProcessorID());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -847,17 +847,16 @@ private:
|
||||
void PopInteractiveOutData(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_AM, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
|
||||
const auto storage = applet->GetBroker().PopInteractiveDataToGame();
|
||||
if (storage == nullptr) {
|
||||
LOG_ERROR(Service_AM,
|
||||
"storage is a nullptr. There is no data in the current interactive channel");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(ERR_NO_DATA_IN_CHANNEL);
|
||||
return;
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushIpcInterface<IStorage>(std::move(*storage));
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ AppletDataBroker::AppletDataBroker(Kernel::KernelCore& kernel) {
|
||||
state_changed_event = Kernel::WritableEvent::CreateEventPair(
|
||||
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:StateChangedEvent");
|
||||
pop_out_data_event = Kernel::WritableEvent::CreateEventPair(
|
||||
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:PopDataOutEvent");
|
||||
kernel, Kernel::ResetType::Automatic, "ILibraryAppletAccessor:PopDataOutEvent");
|
||||
pop_interactive_out_data_event = Kernel::WritableEvent::CreateEventPair(
|
||||
kernel, Kernel::ResetType::Manual, "ILibraryAppletAccessor:PopInteractiveDataOutEvent");
|
||||
kernel, Kernel::ResetType::Automatic, "ILibraryAppletAccessor:PopInteractiveDataOutEvent");
|
||||
}
|
||||
|
||||
AppletDataBroker::~AppletDataBroker() = default;
|
||||
|
||||
@@ -91,6 +91,7 @@ void SoftwareKeyboard::ExecuteInteractive() {
|
||||
|
||||
if (status == INTERACTIVE_STATUS_OK) {
|
||||
complete = true;
|
||||
broker.SignalStateChanged();
|
||||
} else {
|
||||
std::array<char16_t, SWKBD_OUTPUT_INTERACTIVE_BUFFER_SIZE / 2 - 2> string;
|
||||
std::memcpy(string.data(), data.data() + 4, string.size() * 2);
|
||||
|
||||
@@ -40,7 +40,10 @@ static FileSys::VirtualDir GetDirectoryRelativeWrapped(FileSys::VirtualDir base,
|
||||
if (dir_name.empty() || dir_name == "." || dir_name == "/" || dir_name == "\\")
|
||||
return base;
|
||||
|
||||
return base->GetDirectoryRelative(dir_name);
|
||||
const auto res = base->GetDirectoryRelative(dir_name);
|
||||
if (res == nullptr)
|
||||
return base->CreateDirectoryRelative(dir_name);
|
||||
return res;
|
||||
}
|
||||
|
||||
VfsDirectoryServiceWrapper::VfsDirectoryServiceWrapper(FileSys::VirtualDir backing_)
|
||||
|
||||
@@ -203,13 +203,13 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
|
||||
{120, &Hid::SetNpadJoyHoldType, "SetNpadJoyHoldType"},
|
||||
{121, &Hid::GetNpadJoyHoldType, "GetNpadJoyHoldType"},
|
||||
{122, &Hid::SetNpadJoyAssignmentModeSingleByDefault, "SetNpadJoyAssignmentModeSingleByDefault"},
|
||||
{123, nullptr, "SetNpadJoyAssignmentModeSingleByDefault"},
|
||||
{123, &Hid::SetNpadJoyAssignmentModeSingle, "SetNpadJoyAssignmentModeSingle"},
|
||||
{124, &Hid::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"},
|
||||
{125, &Hid::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"},
|
||||
{126, &Hid::StartLrAssignmentMode, "StartLrAssignmentMode"},
|
||||
{127, &Hid::StopLrAssignmentMode, "StopLrAssignmentMode"},
|
||||
{128, &Hid::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"},
|
||||
{129, nullptr, "GetNpadHandheldActivationMode"},
|
||||
{129, &Hid::GetNpadHandheldActivationMode, "GetNpadHandheldActivationMode"},
|
||||
{130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"},
|
||||
{131, nullptr, "IsUnintendedHomeButtonInputProtectionEnabled"},
|
||||
{132, nullptr, "EnableUnintendedHomeButtonInputProtection"},
|
||||
@@ -557,10 +557,126 @@ void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}", npad_id,
|
||||
applet_resource_user_id);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Single);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) {
|
||||
// TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto npad_id{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
const auto npad_joy_device_type{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}",
|
||||
npad_id, applet_resource_user_id, npad_joy_device_type);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Single);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto npad_id{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", npad_id,
|
||||
applet_resource_user_id);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Dual);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto unknown_1{rp.Pop<u32>()};
|
||||
const auto unknown_2{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
|
||||
unknown_1, unknown_2, applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.StartLRAssignmentMode();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.StopLRAssignmentMode();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
const auto mode{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, mode={}",
|
||||
applet_resource_user_id, mode);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::GetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
|
||||
applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto npad_1{rp.Pop<u32>()};
|
||||
const auto npad_2{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}",
|
||||
applet_resource_user_id, npad_1, npad_2);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
if (controller.SwapNpadAssignment(npad_1, npad_2)) {
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
} else {
|
||||
LOG_ERROR(Service_HID, "Npads are not connected!");
|
||||
rb.Push(ERR_NPAD_NOT_CONNECTED);
|
||||
}
|
||||
}
|
||||
|
||||
void Hid::BeginPermitVibrationSession(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
@@ -635,47 +751,6 @@ void Hid::GetActualVibrationValue(Kernel::HLERequestContext& ctx) {
|
||||
applet_resource->GetController<Controller_NPad>(HidController::NPad).GetLastVibration());
|
||||
}
|
||||
|
||||
void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto npad_id{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, npad_id={}, applet_resource_user_id={}", npad_id,
|
||||
applet_resource_user_id);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.SetNpadMode(npad_id, Controller_NPad::NPadAssignments::Dual);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto unknown_1{rp.Pop<u32>()};
|
||||
const auto unknown_2{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID,
|
||||
"(STUBBED) called, unknown_1={}, unknown_2={}, applet_resource_user_id={}",
|
||||
unknown_1, unknown_2, applet_resource_user_id);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
const auto mode{rp.Pop<u64>()};
|
||||
|
||||
LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}, mode={}",
|
||||
applet_resource_user_id, mode);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_HID, "called");
|
||||
|
||||
@@ -769,49 +844,6 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.StartLRAssignmentMode();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
controller.StopLRAssignmentMode();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto npad_1{rp.Pop<u32>()};
|
||||
const auto npad_2{rp.Pop<u32>()};
|
||||
const auto applet_resource_user_id{rp.Pop<u64>()};
|
||||
|
||||
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}",
|
||||
applet_resource_user_id, npad_1, npad_2);
|
||||
|
||||
auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad);
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
if (controller.SwapNpadAssignment(npad_1, npad_2)) {
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
} else {
|
||||
LOG_ERROR(Service_HID, "Npads are not connected!");
|
||||
rb.Push(ERR_NPAD_NOT_CONNECTED);
|
||||
}
|
||||
}
|
||||
|
||||
class HidDbg final : public ServiceFramework<HidDbg> {
|
||||
public:
|
||||
explicit HidDbg() : ServiceFramework{"hid:dbg"} {
|
||||
|
||||
@@ -106,14 +106,19 @@ private:
|
||||
void SetNpadJoyHoldType(Kernel::HLERequestContext& ctx);
|
||||
void GetNpadJoyHoldType(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx);
|
||||
void MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx);
|
||||
void StartLrAssignmentMode(Kernel::HLERequestContext& ctx);
|
||||
void StopLrAssignmentMode(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
|
||||
void GetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
|
||||
void SwapNpadAssignment(Kernel::HLERequestContext& ctx);
|
||||
void BeginPermitVibrationSession(Kernel::HLERequestContext& ctx);
|
||||
void EndPermitVibrationSession(Kernel::HLERequestContext& ctx);
|
||||
void SendVibrationValue(Kernel::HLERequestContext& ctx);
|
||||
void SendVibrationValues(Kernel::HLERequestContext& ctx);
|
||||
void GetActualVibrationValue(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx);
|
||||
void MergeSingleJoyAsDualJoy(Kernel::HLERequestContext& ctx);
|
||||
void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx);
|
||||
void GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx);
|
||||
void CreateActiveVibrationDeviceList(Kernel::HLERequestContext& ctx);
|
||||
void PermitVibration(Kernel::HLERequestContext& ctx);
|
||||
@@ -123,9 +128,6 @@ private:
|
||||
void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
|
||||
void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
|
||||
void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
|
||||
void StartLrAssignmentMode(Kernel::HLERequestContext& ctx);
|
||||
void StopLrAssignmentMode(Kernel::HLERequestContext& ctx);
|
||||
void SwapNpadAssignment(Kernel::HLERequestContext& ctx);
|
||||
|
||||
std::shared_ptr<IAppletResource> applet_resource;
|
||||
Core::System& system;
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
#include "core/hle/service/lbl/lbl.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/settings.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Service::LBL {
|
||||
|
||||
@@ -18,21 +20,21 @@ public:
|
||||
explicit LBL() : ServiceFramework{"lbl"} {
|
||||
// clang-format off
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, nullptr, "SaveCurrentSetting"},
|
||||
{1, nullptr, "LoadCurrentSetting"},
|
||||
{2, nullptr, "SetCurrentBrightnessSetting"},
|
||||
{3, nullptr, "GetCurrentBrightnessSetting"},
|
||||
{4, nullptr, "ApplyCurrentBrightnessSettingToBacklight"},
|
||||
{5, nullptr, "GetBrightnessSettingAppliedToBacklight"},
|
||||
{6, nullptr, "SwitchBacklightOn"},
|
||||
{7, nullptr, "SwitchBacklightOff"},
|
||||
{8, nullptr, "GetBacklightSwitchStatus"},
|
||||
{9, nullptr, "EnableDimming"},
|
||||
{10, nullptr, "DisableDimming"},
|
||||
{11, nullptr, "IsDimmingEnabled"},
|
||||
{12, nullptr, "EnableAutoBrightnessControl"},
|
||||
{13, nullptr, "DisableAutoBrightnessControl"},
|
||||
{14, nullptr, "IsAutoBrightnessControlEnabled"},
|
||||
{0, &LBL::SaveCurrentSetting, "SaveCurrentSetting"},
|
||||
{1, &LBL::LoadCurrentSetting, "LoadCurrentSetting"},
|
||||
{2, &LBL::SetCurrentBrightnessSetting, "SetCurrentBrightnessSetting"},
|
||||
{3, &LBL::GetCurrentBrightnessSetting, "GetCurrentBrightnessSetting"},
|
||||
{4, &LBL::ApplyCurrentBrightnessSettingToBacklight, "ApplyCurrentBrightnessSettingToBacklight"},
|
||||
{5, &LBL::GetBrightnessSettingAppliedToBacklight, "GetBrightnessSettingAppliedToBacklight"},
|
||||
{6, &LBL::SwitchBacklightOn, "SwitchBacklightOn"},
|
||||
{7, &LBL::SwitchBacklightOff, "SwitchBacklightOff"},
|
||||
{8, &LBL::GetBacklightSwitchStatus, "GetBacklightSwitchStatus"},
|
||||
{9, &LBL::EnableDimming, "EnableDimming"},
|
||||
{10, &LBL::DisableDimming, "DisableDimming"},
|
||||
{11, &LBL::IsDimmingEnabled, "IsDimmingEnabled"},
|
||||
{12, &LBL::EnableAutoBrightnessControl, "EnableAutoBrightnessControl"},
|
||||
{13, &LBL::DisableAutoBrightnessControl, "DisableAutoBrightnessControl"},
|
||||
{14, &LBL::IsAutoBrightnessControlEnabled, "IsAutoBrightnessControlEnabled"},
|
||||
{15, nullptr, "SetAmbientLightSensorValue"},
|
||||
{16, nullptr, "GetAmbientLightSensorValue"},
|
||||
{17, nullptr, "SetBrightnessReflectionDelayLevel"},
|
||||
@@ -42,8 +44,8 @@ public:
|
||||
{21, nullptr, "SetCurrentAmbientLightSensorMapping"},
|
||||
{22, nullptr, "GetCurrentAmbientLightSensorMapping"},
|
||||
{23, nullptr, "IsAmbientLightSensorAvailable"},
|
||||
{24, nullptr, "SetCurrentBrightnessSettingForVrMode"},
|
||||
{25, nullptr, "GetCurrentBrightnessSettingForVrMode"},
|
||||
{24, &LBL::SetCurrentBrightnessSettingForVrMode, "SetCurrentBrightnessSettingForVrMode"},
|
||||
{25, &LBL::GetCurrentBrightnessSettingForVrMode, "GetCurrentBrightnessSettingForVrMode"},
|
||||
{26, &LBL::EnableVrMode, "EnableVrMode"},
|
||||
{27, &LBL::DisableVrMode, "DisableVrMode"},
|
||||
{28, &LBL::IsVrModeEnabled, "IsVrModeEnabled"},
|
||||
@@ -53,13 +55,209 @@ public:
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
|
||||
void LoadFromSettings() {
|
||||
current_brightness = Settings::values.backlight_brightness;
|
||||
current_vr_mode_brightness = Settings::values.backlight_brightness;
|
||||
|
||||
if (auto_brightness_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (vr_mode_enabled) {
|
||||
Renderer().SetCurrentBrightness(current_vr_mode_brightness);
|
||||
} else {
|
||||
Renderer().SetCurrentBrightness(current_brightness);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
f32 GetAutoBrightnessValue() const {
|
||||
return 0.5f;
|
||||
}
|
||||
|
||||
VideoCore::RendererBase& Renderer() {
|
||||
return Core::System::GetInstance().Renderer();
|
||||
}
|
||||
|
||||
void SaveCurrentSetting(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
Settings::values.backlight_brightness = current_brightness;
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void LoadCurrentSetting(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
LoadFromSettings();
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void SetCurrentBrightnessSetting(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto value = rp.PopRaw<f32>();
|
||||
|
||||
LOG_DEBUG(Service_LBL, "called, value={:.3f}", value);
|
||||
|
||||
current_brightness = std::clamp(value, 0.0f, 1.0f);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void GetCurrentBrightnessSetting(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(current_brightness);
|
||||
}
|
||||
|
||||
void ApplyCurrentBrightnessSettingToBacklight(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
if (!auto_brightness_enabled) {
|
||||
Renderer().SetCurrentBrightness(vr_mode_enabled ? current_vr_mode_brightness
|
||||
: current_brightness);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void GetBrightnessSettingAppliedToBacklight(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(Renderer().GetCurrentResultantBrightness());
|
||||
}
|
||||
|
||||
void SwitchBacklightOn(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fade_time = rp.PopRaw<u64>();
|
||||
|
||||
LOG_DEBUG(Service_LBL, "called, fade_time={:016X}", fade_time);
|
||||
|
||||
Renderer().SetBacklightStatus(true, fade_time);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void SwitchBacklightOff(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto fade_time = rp.PopRaw<u64>();
|
||||
|
||||
LOG_DEBUG(Service_LBL, "called, fade_time={:016X}", fade_time);
|
||||
|
||||
Renderer().SetBacklightStatus(false, fade_time);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void GetBacklightSwitchStatus(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u8>(Renderer().GetBacklightStatus());
|
||||
}
|
||||
|
||||
void EnableDimming(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
dimming_enabled = true;
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void DisableDimming(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "callled");
|
||||
|
||||
dimming_enabled = false;
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void IsDimmingEnabled(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u8>(dimming_enabled);
|
||||
}
|
||||
|
||||
void EnableAutoBrightnessControl(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
auto_brightness_enabled = true;
|
||||
Renderer().SetCurrentBrightness(GetAutoBrightnessValue());
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void DisableAutoBrightnessControl(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
auto_brightness_enabled = false;
|
||||
Renderer().SetCurrentBrightness(current_brightness);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void IsAutoBrightnessControlEnabled(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u8>(auto_brightness_enabled);
|
||||
}
|
||||
|
||||
void SetCurrentBrightnessSettingForVrMode(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto value = rp.PopRaw<f32>();
|
||||
|
||||
LOG_DEBUG(Service_LBL, "called, value={:.3f}", value);
|
||||
|
||||
current_vr_mode_brightness = std::clamp(value, 0.0f, 1.0f);
|
||||
|
||||
if (vr_mode_enabled && !auto_brightness_enabled) {
|
||||
Renderer().SetCurrentBrightness(value);
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
void GetCurrentBrightnessSettingForVrMode(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(current_vr_mode_brightness);
|
||||
}
|
||||
|
||||
void EnableVrMode(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_LBL, "called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
||||
if (!vr_mode_enabled && !auto_brightness_enabled &&
|
||||
current_brightness != current_vr_mode_brightness) {
|
||||
Renderer().SetCurrentBrightness(current_vr_mode_brightness);
|
||||
}
|
||||
|
||||
vr_mode_enabled = true;
|
||||
}
|
||||
|
||||
@@ -69,6 +267,11 @@ private:
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
||||
if (vr_mode_enabled && !auto_brightness_enabled &&
|
||||
current_brightness != current_vr_mode_brightness) {
|
||||
Renderer().SetCurrentBrightness(current_brightness);
|
||||
}
|
||||
|
||||
vr_mode_enabled = false;
|
||||
}
|
||||
|
||||
@@ -80,9 +283,27 @@ private:
|
||||
rb.Push(vr_mode_enabled);
|
||||
}
|
||||
|
||||
bool auto_brightness_enabled = false;
|
||||
bool dimming_enabled = true;
|
||||
|
||||
f32 current_brightness = GetAutoBrightnessValue();
|
||||
f32 current_vr_mode_brightness = GetAutoBrightnessValue();
|
||||
|
||||
bool vr_mode_enabled = false;
|
||||
};
|
||||
|
||||
void RequestLoadCurrentSetting(SM::ServiceManager& sm) {
|
||||
if (&sm == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto lbl = sm.GetService<LBL>("lbl");
|
||||
|
||||
if (lbl) {
|
||||
lbl->LoadFromSettings();
|
||||
}
|
||||
}
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& sm) {
|
||||
std::make_shared<LBL>()->InstallAsService(sm);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,9 @@ class ServiceManager;
|
||||
|
||||
namespace Service::LBL {
|
||||
|
||||
// Requests the LBL service passed to load brightness values from Settings
|
||||
void RequestLoadCurrentSetting(SM::ServiceManager& sm);
|
||||
|
||||
void InstallInterfaces(SM::ServiceManager& sm);
|
||||
|
||||
} // namespace Service::LBL
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/hle/service/nifm/nifm.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
namespace Service::NIFM {
|
||||
|
||||
@@ -88,7 +89,12 @@ private:
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(RequestState::Connected);
|
||||
|
||||
if (Settings::values.bcat_backend == "none") {
|
||||
rb.PushEnum(RequestState::NotSubmitted);
|
||||
} else {
|
||||
rb.PushEnum(RequestState::Connected);
|
||||
}
|
||||
}
|
||||
|
||||
void GetResult(Kernel::HLERequestContext& ctx) {
|
||||
@@ -196,14 +202,22 @@ private:
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u8>(1);
|
||||
if (Settings::values.bcat_backend == "none") {
|
||||
rb.Push<u8>(0);
|
||||
} else {
|
||||
rb.Push<u8>(1);
|
||||
}
|
||||
}
|
||||
void IsAnyInternetRequestAccepted(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_NIFM, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push<u8>(1);
|
||||
if (Settings::values.bcat_backend == "none") {
|
||||
rb.Push<u8>(0);
|
||||
} else {
|
||||
rb.Push<u8>(1);
|
||||
}
|
||||
}
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
@@ -6,13 +6,6 @@
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#include <FontChineseSimplified.h>
|
||||
#include <FontChineseTraditional.h>
|
||||
#include <FontExtendedChineseSimplified.h>
|
||||
#include <FontKorean.h>
|
||||
#include <FontNintendoExtended.h>
|
||||
#include <FontStandard.h>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_paths.h"
|
||||
#include "common/common_types.h"
|
||||
@@ -24,7 +17,9 @@
|
||||
#include "core/file_sys/nca_metadata.h"
|
||||
#include "core/file_sys/registered_cache.h"
|
||||
#include "core/file_sys/romfs.h"
|
||||
#include "core/file_sys/system_archive/system_archive.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/ns/pl_u.h"
|
||||
@@ -94,15 +89,20 @@ static void DecryptSharedFont(const std::vector<u32>& input, Kernel::PhysicalMem
|
||||
offset += transformed_font.size() * sizeof(u32);
|
||||
}
|
||||
|
||||
static void EncryptSharedFont(const std::vector<u8>& input, Kernel::PhysicalMemory& output,
|
||||
std::size_t& offset) {
|
||||
ASSERT_MSG(offset + input.size() + 8 < SHARED_FONT_MEM_SIZE, "Shared fonts exceeds 17mb!");
|
||||
const u32 KEY = EXPECTED_MAGIC ^ EXPECTED_RESULT;
|
||||
std::memcpy(output.data() + offset, &EXPECTED_RESULT, sizeof(u32)); // Magic header
|
||||
const u32 ENC_SIZE = static_cast<u32>(input.size()) ^ KEY;
|
||||
std::memcpy(output.data() + offset + sizeof(u32), &ENC_SIZE, sizeof(u32));
|
||||
std::memcpy(output.data() + offset + (sizeof(u32) * 2), input.data(), input.size());
|
||||
offset += input.size() + (sizeof(u32) * 2);
|
||||
void EncryptSharedFont(const std::vector<u32>& input, std::vector<u8>& output,
|
||||
std::size_t& offset) {
|
||||
ASSERT_MSG(offset + (input.size() * sizeof(u32)) < SHARED_FONT_MEM_SIZE,
|
||||
"Shared fonts exceeds 17mb!");
|
||||
|
||||
const auto key = Common::swap32(EXPECTED_RESULT ^ EXPECTED_MAGIC);
|
||||
std::vector<u32> transformed_font(input.size() + 2);
|
||||
transformed_font[0] = Common::swap32(EXPECTED_MAGIC);
|
||||
transformed_font[1] = Common::swap32(input.size() * sizeof(u32)) ^ key;
|
||||
std::transform(input.begin(), input.end(), transformed_font.begin() + 2,
|
||||
[key](u32 in) { return in ^ key; });
|
||||
std::memcpy(output.data() + offset, transformed_font.data(),
|
||||
transformed_font.size() * sizeof(u32));
|
||||
offset += transformed_font.size() * sizeof(u32);
|
||||
}
|
||||
|
||||
// Helper function to make BuildSharedFontsRawRegions a bit nicer
|
||||
@@ -168,114 +168,49 @@ PL_U::PL_U(Core::System& system)
|
||||
// Attempt to load shared font data from disk
|
||||
const auto* nand = fsc.GetSystemNANDContents();
|
||||
std::size_t offset = 0;
|
||||
// Rebuild shared fonts from data ncas
|
||||
if (nand->HasEntry(static_cast<u64>(FontArchives::Standard),
|
||||
FileSys::ContentRecordType::Data)) {
|
||||
impl->shared_font = std::make_shared<Kernel::PhysicalMemory>(SHARED_FONT_MEM_SIZE);
|
||||
for (auto font : SHARED_FONTS) {
|
||||
const auto nca =
|
||||
nand->GetEntry(static_cast<u64>(font.first), FileSys::ContentRecordType::Data);
|
||||
if (!nca) {
|
||||
LOG_ERROR(Service_NS, "Failed to find {:016X}! Skipping",
|
||||
static_cast<u64>(font.first));
|
||||
continue;
|
||||
}
|
||||
const auto romfs = nca->GetRomFS();
|
||||
if (!romfs) {
|
||||
LOG_ERROR(Service_NS, "{:016X} has no RomFS! Skipping",
|
||||
static_cast<u64>(font.first));
|
||||
continue;
|
||||
}
|
||||
const auto extracted_romfs = FileSys::ExtractRomFS(romfs);
|
||||
if (!extracted_romfs) {
|
||||
LOG_ERROR(Service_NS, "Failed to extract RomFS for {:016X}! Skipping",
|
||||
static_cast<u64>(font.first));
|
||||
continue;
|
||||
}
|
||||
const auto font_fp = extracted_romfs->GetFile(font.second);
|
||||
if (!font_fp) {
|
||||
LOG_ERROR(Service_NS, "{:016X} has no file \"{}\"! Skipping",
|
||||
static_cast<u64>(font.first), font.second);
|
||||
continue;
|
||||
}
|
||||
std::vector<u32> font_data_u32(font_fp->GetSize() / sizeof(u32));
|
||||
font_fp->ReadBytes<u32>(font_data_u32.data(), font_fp->GetSize());
|
||||
// We need to be BigEndian as u32s for the xor encryption
|
||||
std::transform(font_data_u32.begin(), font_data_u32.end(), font_data_u32.begin(),
|
||||
Common::swap32);
|
||||
FontRegion region{
|
||||
static_cast<u32>(offset + 8),
|
||||
static_cast<u32>((font_data_u32.size() * sizeof(u32)) -
|
||||
8)}; // Font offset and size do not account for the header
|
||||
DecryptSharedFont(font_data_u32, *impl->shared_font, offset);
|
||||
impl->shared_font_regions.push_back(region);
|
||||
// Rebuild shared fonts from data ncas or synthesize
|
||||
|
||||
impl->shared_font = std::make_shared<Kernel::PhysicalMemory>(SHARED_FONT_MEM_SIZE);
|
||||
for (auto font : SHARED_FONTS) {
|
||||
FileSys::VirtualFile romfs;
|
||||
const auto nca =
|
||||
nand->GetEntry(static_cast<u64>(font.first), FileSys::ContentRecordType::Data);
|
||||
if (nca) {
|
||||
romfs = nca->GetRomFS();
|
||||
}
|
||||
|
||||
} else {
|
||||
impl->shared_font = std::make_shared<Kernel::PhysicalMemory>(
|
||||
SHARED_FONT_MEM_SIZE); // Shared memory needs to always be allocated and a fixed size
|
||||
|
||||
const std::string user_path = FileUtil::GetUserPath(FileUtil::UserPath::SysDataDir);
|
||||
const std::string filepath{user_path + SHARED_FONT};
|
||||
|
||||
// Create path if not already created
|
||||
if (!FileUtil::CreateFullPath(filepath)) {
|
||||
LOG_ERROR(Service_NS, "Failed to create sharedfonts path \"{}\"!", filepath);
|
||||
return;
|
||||
if (!romfs) {
|
||||
romfs = FileSys::SystemArchive::SynthesizeSystemArchive(static_cast<u64>(font.first));
|
||||
}
|
||||
|
||||
bool using_ttf = false;
|
||||
for (const char* font_ttf : SHARED_FONTS_TTF) {
|
||||
if (FileUtil::Exists(user_path + font_ttf)) {
|
||||
using_ttf = true;
|
||||
FileUtil::IOFile file(user_path + font_ttf, "rb");
|
||||
if (file.IsOpen()) {
|
||||
std::vector<u8> ttf_bytes(file.GetSize());
|
||||
file.ReadBytes<u8>(ttf_bytes.data(), ttf_bytes.size());
|
||||
FontRegion region{
|
||||
static_cast<u32>(offset + 8),
|
||||
static_cast<u32>(ttf_bytes.size())}; // Font offset and size do not account
|
||||
// for the header
|
||||
EncryptSharedFont(ttf_bytes, *impl->shared_font, offset);
|
||||
impl->shared_font_regions.push_back(region);
|
||||
} else {
|
||||
LOG_WARNING(Service_NS, "Unable to load font: {}", font_ttf);
|
||||
}
|
||||
} else if (using_ttf) {
|
||||
LOG_WARNING(Service_NS, "Unable to find font: {}", font_ttf);
|
||||
}
|
||||
if (!romfs) {
|
||||
LOG_ERROR(Service_NS, "Failed to find or synthesize {:016X}! Skipping",
|
||||
static_cast<u64>(font.first));
|
||||
continue;
|
||||
}
|
||||
if (using_ttf)
|
||||
return;
|
||||
FileUtil::IOFile file(filepath, "rb");
|
||||
|
||||
if (file.IsOpen()) {
|
||||
// Read shared font data
|
||||
ASSERT(file.GetSize() == SHARED_FONT_MEM_SIZE);
|
||||
file.ReadBytes(impl->shared_font->data(), impl->shared_font->size());
|
||||
impl->BuildSharedFontsRawRegions(*impl->shared_font);
|
||||
} else {
|
||||
LOG_WARNING(Service_NS,
|
||||
"Shared Font file missing. Loading open source replacement from memory");
|
||||
|
||||
// clang-format off
|
||||
const std::vector<std::vector<u8>> open_source_shared_fonts_ttf = {
|
||||
{std::begin(FontChineseSimplified), std::end(FontChineseSimplified)},
|
||||
{std::begin(FontChineseTraditional), std::end(FontChineseTraditional)},
|
||||
{std::begin(FontExtendedChineseSimplified), std::end(FontExtendedChineseSimplified)},
|
||||
{std::begin(FontKorean), std::end(FontKorean)},
|
||||
{std::begin(FontNintendoExtended), std::end(FontNintendoExtended)},
|
||||
{std::begin(FontStandard), std::end(FontStandard)},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
for (const std::vector<u8>& font_ttf : open_source_shared_fonts_ttf) {
|
||||
const FontRegion region{static_cast<u32>(offset + 8),
|
||||
static_cast<u32>(font_ttf.size())};
|
||||
EncryptSharedFont(font_ttf, *impl->shared_font, offset);
|
||||
impl->shared_font_regions.push_back(region);
|
||||
}
|
||||
const auto extracted_romfs = FileSys::ExtractRomFS(romfs);
|
||||
if (!extracted_romfs) {
|
||||
LOG_ERROR(Service_NS, "Failed to extract RomFS for {:016X}! Skipping",
|
||||
static_cast<u64>(font.first));
|
||||
continue;
|
||||
}
|
||||
const auto font_fp = extracted_romfs->GetFile(font.second);
|
||||
if (!font_fp) {
|
||||
LOG_ERROR(Service_NS, "{:016X} has no file \"{}\"! Skipping",
|
||||
static_cast<u64>(font.first), font.second);
|
||||
continue;
|
||||
}
|
||||
std::vector<u32> font_data_u32(font_fp->GetSize() / sizeof(u32));
|
||||
font_fp->ReadBytes<u32>(font_data_u32.data(), font_fp->GetSize());
|
||||
// We need to be BigEndian as u32s for the xor encryption
|
||||
std::transform(font_data_u32.begin(), font_data_u32.end(), font_data_u32.begin(),
|
||||
Common::swap32);
|
||||
// Font offset and size do not account for the header
|
||||
const FontRegion region{static_cast<u32>(offset + 8),
|
||||
static_cast<u32>((font_data_u32.size() * sizeof(u32)) - 8)};
|
||||
DecryptSharedFont(font_data_u32, *impl->shared_font, offset);
|
||||
impl->shared_font_regions.push_back(region);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Service {
|
||||
@@ -15,6 +16,8 @@ class FileSystemController;
|
||||
|
||||
namespace NS {
|
||||
|
||||
void EncryptSharedFont(const std::vector<u32>& input, std::vector<u8>& output, std::size_t& offset);
|
||||
|
||||
class PL_U final : public ServiceFramework<PL_U> {
|
||||
public:
|
||||
explicit PL_U(Core::System& system);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||
#include "core/perf_stats.h"
|
||||
@@ -38,7 +39,10 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3
|
||||
transform, crop_rect};
|
||||
|
||||
system.GetPerfStats().EndGameFrame();
|
||||
system.GetPerfStats().EndSystemFrame();
|
||||
system.GPU().SwapBuffers(&framebuffer);
|
||||
system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||
system.GetPerfStats().BeginSystemFrame();
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -63,16 +63,26 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
auto event = events_interface.events[event_id];
|
||||
auto& gpu = system.GPU();
|
||||
// This is mostly to take into account unimplemented features. As synced
|
||||
// gpu is always synced.
|
||||
if (!gpu.IsAsync()) {
|
||||
event.writable->Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
auto lock = gpu.LockSync();
|
||||
const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id);
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
if (diff >= 0) {
|
||||
event.writable->Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
@@ -88,27 +98,6 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
u32 event_id;
|
||||
if (is_async) {
|
||||
event_id = params.value & 0x00FF;
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
} else {
|
||||
if (ctrl.fresh_call) {
|
||||
const auto result = events_interface.GetFreeEvent();
|
||||
if (result) {
|
||||
event_id = *result;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "No Free Events available!");
|
||||
event_id = params.value & 0x00FF;
|
||||
}
|
||||
} else {
|
||||
event_id = ctrl.event_id;
|
||||
}
|
||||
}
|
||||
|
||||
EventState status = events_interface.status[event_id];
|
||||
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
@@ -120,7 +109,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
||||
params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
|
||||
}
|
||||
params.value |= event_id;
|
||||
events_interface.events[event_id].writable->Clear();
|
||||
event.writable->Clear();
|
||||
gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
|
||||
if (!is_async && ctrl.fresh_call) {
|
||||
ctrl.must_delay = true;
|
||||
|
||||
@@ -22,6 +22,18 @@ u32 nvhost_nvdec::ioctl(Ioctl command, const std::vector<u8>& input, const std::
|
||||
switch (static_cast<IoctlCommand>(command.raw)) {
|
||||
case IoctlCommand::IocSetNVMAPfdCommand:
|
||||
return SetNVMAPfd(input, output);
|
||||
case IoctlCommand::IocSubmit:
|
||||
return Submit(input, output);
|
||||
case IoctlCommand::IocGetSyncpoint:
|
||||
return GetSyncpoint(input, output);
|
||||
case IoctlCommand::IocGetWaitbase:
|
||||
return GetWaitbase(input, output);
|
||||
case IoctlCommand::IocMapBuffer:
|
||||
return MapBuffer(input, output);
|
||||
case IoctlCommand::IocMapBufferEx:
|
||||
return MapBufferEx(input, output);
|
||||
case IoctlCommand::IocUnmapBufferEx:
|
||||
return UnmapBufferEx(input, output);
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl");
|
||||
@@ -30,11 +42,67 @@ u32 nvhost_nvdec::ioctl(Ioctl command, const std::vector<u8>& input, const std::
|
||||
|
||||
u32 nvhost_nvdec::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD));
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSubmit params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetSyncpoint params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::MapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called with address={:08X}{:08X}", params.address_2,
|
||||
params.address_1);
|
||||
params.address_1 = 0;
|
||||
params.address_2 = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBufferEx));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called with address={:08X}{:08X}", params.address_2,
|
||||
params.address_1);
|
||||
params.address_1 = 0;
|
||||
params.address_2 = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBufferEx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_nvdec::UnmapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlUnmapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlUnmapBufferEx));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlUnmapBufferEx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -23,16 +23,66 @@ public:
|
||||
private:
|
||||
enum class IoctlCommand : u32_le {
|
||||
IocSetNVMAPfdCommand = 0x40044801,
|
||||
IocSubmit = 0xC0400001,
|
||||
IocGetSyncpoint = 0xC0080002,
|
||||
IocGetWaitbase = 0xC0080003,
|
||||
IocMapBuffer = 0xC01C0009,
|
||||
IocMapBufferEx = 0xC0A40009,
|
||||
IocUnmapBufferEx = 0xC0A4000A,
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
u32_le nvmap_fd;
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 0x4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlSubmit {
|
||||
INSERT_PADDING_BYTES(0x40); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmit) == 0x40, "IoctlSubmit has incorrect size");
|
||||
|
||||
struct IoctlGetSyncpoint {
|
||||
u32 unknown; // seems to be ignored? Nintendo added this
|
||||
u32 value;
|
||||
};
|
||||
static_assert(sizeof(IoctlGetSyncpoint) == 0x08, "IoctlGetSyncpoint has incorrect size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32 unknown; // seems to be ignored? Nintendo added this
|
||||
u32 value;
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 0x08, "IoctlGetWaitbase has incorrect size");
|
||||
|
||||
struct IoctlMapBuffer {
|
||||
u32 unknown;
|
||||
u32 address_1;
|
||||
u32 address_2;
|
||||
INSERT_PADDING_BYTES(0x10); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x1C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
struct IoctlMapBufferEx {
|
||||
u32 unknown;
|
||||
u32 address_1;
|
||||
u32 address_2;
|
||||
INSERT_PADDING_BYTES(0x98); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBufferEx) == 0xA4, "IoctlMapBufferEx has incorrect size");
|
||||
|
||||
struct IoctlUnmapBufferEx {
|
||||
INSERT_PADDING_BYTES(0xA4); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlUnmapBufferEx) == 0xA4, "IoctlUnmapBufferEx has incorrect size");
|
||||
|
||||
u32_le nvmap_fd{};
|
||||
|
||||
u32 SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 UnmapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -22,6 +22,18 @@ u32 nvhost_vic::ioctl(Ioctl command, const std::vector<u8>& input, const std::ve
|
||||
switch (static_cast<IoctlCommand>(command.raw)) {
|
||||
case IoctlCommand::IocSetNVMAPfdCommand:
|
||||
return SetNVMAPfd(input, output);
|
||||
case IoctlCommand::IocSubmit:
|
||||
return Submit(input, output);
|
||||
case IoctlCommand::IocGetSyncpoint:
|
||||
return GetSyncpoint(input, output);
|
||||
case IoctlCommand::IocGetWaitbase:
|
||||
return GetWaitbase(input, output);
|
||||
case IoctlCommand::IocMapBuffer:
|
||||
return MapBuffer(input, output);
|
||||
case IoctlCommand::IocMapBufferEx:
|
||||
return MapBuffer(input, output);
|
||||
case IoctlCommand::IocUnmapBufferEx:
|
||||
return UnmapBufferEx(input, output);
|
||||
}
|
||||
|
||||
UNIMPLEMENTED_MSG("Unimplemented ioctl");
|
||||
@@ -30,11 +42,67 @@ u32 nvhost_vic::ioctl(Ioctl command, const std::vector<u8>& input, const std::ve
|
||||
|
||||
u32 nvhost_vic::SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSetNvmapFD params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSetNvmapFD));
|
||||
LOG_DEBUG(Service_NVDRV, "called, fd={}", params.nvmap_fd);
|
||||
|
||||
nvmap_fd = params.nvmap_fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::Submit(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlSubmit params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlSubmit));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetSyncpoint params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlGetWaitbase params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||
LOG_INFO(Service_NVDRV, "called, unknown=0x{:X}", params.unknown);
|
||||
params.value = 0; // Seems to be hard coded at 0
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::MapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called with address={:08X}{:08X}", params.address_2,
|
||||
params.address_1);
|
||||
params.address_1 = 0;
|
||||
params.address_2 = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlMapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBufferEx));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called with address={:08X}{:08X}", params.address_2,
|
||||
params.address_1);
|
||||
params.address_1 = 0;
|
||||
params.address_2 = 0;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBufferEx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvhost_vic::UnmapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IoctlUnmapBufferEx params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(IoctlUnmapBufferEx));
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlUnmapBufferEx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -23,6 +23,12 @@ public:
|
||||
private:
|
||||
enum class IoctlCommand : u32_le {
|
||||
IocSetNVMAPfdCommand = 0x40044801,
|
||||
IocSubmit = 0xC0400001,
|
||||
IocGetSyncpoint = 0xC0080002,
|
||||
IocGetWaitbase = 0xC0080003,
|
||||
IocMapBuffer = 0xC01C0009,
|
||||
IocMapBufferEx = 0xC03C0009,
|
||||
IocUnmapBufferEx = 0xC03C000A,
|
||||
};
|
||||
|
||||
struct IoctlSetNvmapFD {
|
||||
@@ -30,9 +36,53 @@ private:
|
||||
};
|
||||
static_assert(sizeof(IoctlSetNvmapFD) == 4, "IoctlSetNvmapFD is incorrect size");
|
||||
|
||||
struct IoctlSubmit {
|
||||
INSERT_PADDING_BYTES(0x40); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlSubmit) == 0x40, "IoctlSubmit is incorrect size");
|
||||
|
||||
struct IoctlGetSyncpoint {
|
||||
u32 unknown; // seems to be ignored? Nintendo added this
|
||||
u32 value;
|
||||
};
|
||||
static_assert(sizeof(IoctlGetSyncpoint) == 0x8, "IoctlGetSyncpoint is incorrect size");
|
||||
|
||||
struct IoctlGetWaitbase {
|
||||
u32 unknown; // seems to be ignored? Nintendo added this
|
||||
u32 value;
|
||||
};
|
||||
static_assert(sizeof(IoctlGetWaitbase) == 0x8, "IoctlGetWaitbase is incorrect size");
|
||||
|
||||
struct IoctlMapBuffer {
|
||||
u32 unknown;
|
||||
u32 address_1;
|
||||
u32 address_2;
|
||||
INSERT_PADDING_BYTES(0x10); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBuffer) == 0x1C, "IoctlMapBuffer is incorrect size");
|
||||
|
||||
struct IoctlMapBufferEx {
|
||||
u32 unknown;
|
||||
u32 address_1;
|
||||
u32 address_2;
|
||||
INSERT_PADDING_BYTES(0x30); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlMapBufferEx) == 0x3C, "IoctlMapBufferEx is incorrect size");
|
||||
|
||||
struct IoctlUnmapBufferEx {
|
||||
INSERT_PADDING_BYTES(0x3C); // TODO(DarkLordZach): RE this structure
|
||||
};
|
||||
static_assert(sizeof(IoctlUnmapBufferEx) == 0x3C, "IoctlUnmapBufferEx is incorrect size");
|
||||
|
||||
u32_le nvmap_fd{};
|
||||
|
||||
u32 SetNVMAPfd(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 Submit(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 GetSyncpoint(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 MapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
u32 UnmapBufferEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||
};
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
||||
@@ -134,7 +134,9 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||
IPC::ResponseBuilder rb{ctx, 3, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
if (event_id < MaxNvEvents) {
|
||||
rb.PushCopyObjects(nvdrv->GetEvent(event_id));
|
||||
auto event = nvdrv->GetEvent(event_id);
|
||||
event->Clear();
|
||||
rb.PushCopyObjects(event);
|
||||
rb.Push<u32>(NvResult::Success);
|
||||
} else {
|
||||
rb.Push<u32>(0);
|
||||
|
||||
@@ -40,8 +40,8 @@ Module::Module(Core::System& system) {
|
||||
auto& kernel = system.Kernel();
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
std::string event_label = fmt::format("NVDRV::NvEvent_{}", i);
|
||||
events_interface.events[i] = Kernel::WritableEvent::CreateEventPair(
|
||||
kernel, Kernel::ResetType::Automatic, event_label);
|
||||
events_interface.events[i] =
|
||||
Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual, event_label);
|
||||
events_interface.status[i] = EventState::Free;
|
||||
events_interface.registered[i] = false;
|
||||
}
|
||||
|
||||
@@ -187,14 +187,18 @@ void NVFlinger::Compose() {
|
||||
MicroProfileFlip();
|
||||
|
||||
if (!buffer) {
|
||||
// There was no queued buffer to draw, render previous frame
|
||||
system.GetPerfStats().EndGameFrame();
|
||||
system.GPU().SwapBuffers({});
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto& igbp_buffer = buffer->get().igbp_buffer;
|
||||
|
||||
const auto& gpu = system.GPU();
|
||||
const auto& multi_fence = buffer->get().multi_fence;
|
||||
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
|
||||
const auto& fence = multi_fence.fences[fence_id];
|
||||
gpu.WaitFence(fence.id, fence.value);
|
||||
}
|
||||
|
||||
// Now send the buffer to the GPU for drawing.
|
||||
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
||||
// on which display we're drawing (Default, Internal, External, etc)
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include "core/core.h"
|
||||
#include "core/gdbstub/gdbstub.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
#include "core/hle/service/lbl/lbl.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
#include "core/settings.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
@@ -70,6 +72,7 @@ void Apply() {
|
||||
auto& system_instance = Core::System::GetInstance();
|
||||
if (system_instance.IsPoweredOn()) {
|
||||
system_instance.Renderer().RefreshBaseSettings();
|
||||
Service::LBL::RequestLoadCurrentSetting(system_instance.ServiceManager());
|
||||
}
|
||||
|
||||
Service::HID::ReloadInputDevices();
|
||||
|
||||
@@ -428,6 +428,8 @@ struct Values {
|
||||
float bg_green;
|
||||
float bg_blue;
|
||||
|
||||
float backlight_brightness = 0.5f;
|
||||
|
||||
std::string log_filter;
|
||||
|
||||
bool use_dev_keys;
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <array>
|
||||
#include <bitset>
|
||||
#include <cstdlib>
|
||||
#include <string>
|
||||
#include "common/file_util.h"
|
||||
#include "core/core.h"
|
||||
@@ -13,7 +14,7 @@
|
||||
|
||||
// Numbers are chosen randomly to make sure the correct one is given.
|
||||
static constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
|
||||
static constexpr int MAX_SLICE_LENGTH = 20000; // Copied from CoreTiming internals
|
||||
static constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
|
||||
|
||||
static std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||
static u64 expected_callback = 0;
|
||||
@@ -28,6 +29,12 @@ void CallbackTemplate(u64 userdata, s64 cycles_late) {
|
||||
REQUIRE(lateness == cycles_late);
|
||||
}
|
||||
|
||||
static u64 callbacks_done = 0;
|
||||
|
||||
void EmptyCallback(u64 userdata, s64 cycles_late) {
|
||||
++callbacks_done;
|
||||
}
|
||||
|
||||
struct ScopeInit final {
|
||||
ScopeInit() {
|
||||
core_timing.Initialize();
|
||||
@@ -39,18 +46,19 @@ struct ScopeInit final {
|
||||
Core::Timing::CoreTiming core_timing;
|
||||
};
|
||||
|
||||
static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, int downcount,
|
||||
static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0,
|
||||
int expected_lateness = 0, int cpu_downcount = 0) {
|
||||
callbacks_ran_flags = 0;
|
||||
expected_callback = CB_IDS[idx];
|
||||
lateness = expected_lateness;
|
||||
|
||||
// Pretend we executed X cycles of instructions.
|
||||
core_timing.SwitchContext(context);
|
||||
core_timing.AddTicks(core_timing.GetDowncount() - cpu_downcount);
|
||||
core_timing.Advance();
|
||||
core_timing.SwitchContext((context + 1) % 4);
|
||||
|
||||
REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags);
|
||||
REQUIRE(downcount == core_timing.GetDowncount());
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
@@ -64,9 +72,10 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
core_timing.Advance();
|
||||
core_timing.ResetRun();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
core_timing.SwitchContext(0);
|
||||
core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
REQUIRE(1000 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
|
||||
@@ -78,98 +87,46 @@ TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
|
||||
core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
|
||||
REQUIRE(100 == core_timing.GetDowncount());
|
||||
|
||||
AdvanceAndCheck(core_timing, 3, 400);
|
||||
AdvanceAndCheck(core_timing, 1, 300);
|
||||
AdvanceAndCheck(core_timing, 2, 200);
|
||||
AdvanceAndCheck(core_timing, 0, 200);
|
||||
AdvanceAndCheck(core_timing, 4, MAX_SLICE_LENGTH);
|
||||
AdvanceAndCheck(core_timing, 3, 0);
|
||||
AdvanceAndCheck(core_timing, 1, 1);
|
||||
AdvanceAndCheck(core_timing, 2, 2);
|
||||
AdvanceAndCheck(core_timing, 0, 3);
|
||||
AdvanceAndCheck(core_timing, 4, 0);
|
||||
}
|
||||
|
||||
TEST_CASE("CoreTiming[Threadsave]", "[core]") {
|
||||
ScopeInit guard;
|
||||
auto& core_timing = guard.core_timing;
|
||||
|
||||
Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", CallbackTemplate<0>);
|
||||
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", CallbackTemplate<2>);
|
||||
Core::Timing::EventType* cb_d = core_timing.RegisterEvent("callbackD", CallbackTemplate<3>);
|
||||
Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", CallbackTemplate<4>);
|
||||
|
||||
// Enter slice 0
|
||||
core_timing.Advance();
|
||||
|
||||
// D -> B -> C -> A -> E
|
||||
core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(1000);
|
||||
REQUIRE(1000 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(500);
|
||||
REQUIRE(500 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(800);
|
||||
REQUIRE(500 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(100);
|
||||
REQUIRE(100 == core_timing.GetDowncount());
|
||||
core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
|
||||
// Manually force since ScheduleEvent doesn't call it
|
||||
core_timing.ForceExceptionCheck(1200);
|
||||
REQUIRE(100 == core_timing.GetDowncount());
|
||||
|
||||
AdvanceAndCheck(core_timing, 3, 400);
|
||||
AdvanceAndCheck(core_timing, 1, 300);
|
||||
AdvanceAndCheck(core_timing, 2, 200);
|
||||
AdvanceAndCheck(core_timing, 0, 200);
|
||||
AdvanceAndCheck(core_timing, 4, MAX_SLICE_LENGTH);
|
||||
}
|
||||
|
||||
namespace SharedSlotTest {
|
||||
static unsigned int counter = 0;
|
||||
|
||||
template <unsigned int ID>
|
||||
void FifoCallback(u64 userdata, s64 cycles_late) {
|
||||
static_assert(ID < CB_IDS.size(), "ID out of range");
|
||||
callbacks_ran_flags.set(ID);
|
||||
REQUIRE(CB_IDS[ID] == userdata);
|
||||
REQUIRE(ID == counter);
|
||||
REQUIRE(lateness == cycles_late);
|
||||
++counter;
|
||||
}
|
||||
} // namespace SharedSlotTest
|
||||
|
||||
TEST_CASE("CoreTiming[SharedSlot]", "[core]") {
|
||||
using namespace SharedSlotTest;
|
||||
TEST_CASE("CoreTiming[FairSharing]", "[core]") {
|
||||
|
||||
ScopeInit guard;
|
||||
auto& core_timing = guard.core_timing;
|
||||
|
||||
Core::Timing::EventType* cb_a = core_timing.RegisterEvent("callbackA", FifoCallback<0>);
|
||||
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", FifoCallback<1>);
|
||||
Core::Timing::EventType* cb_c = core_timing.RegisterEvent("callbackC", FifoCallback<2>);
|
||||
Core::Timing::EventType* cb_d = core_timing.RegisterEvent("callbackD", FifoCallback<3>);
|
||||
Core::Timing::EventType* cb_e = core_timing.RegisterEvent("callbackE", FifoCallback<4>);
|
||||
Core::Timing::EventType* empty_callback =
|
||||
core_timing.RegisterEvent("empty_callback", EmptyCallback);
|
||||
|
||||
core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
|
||||
core_timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
core_timing.ScheduleEvent(1000, cb_c, CB_IDS[2]);
|
||||
core_timing.ScheduleEvent(1000, cb_d, CB_IDS[3]);
|
||||
core_timing.ScheduleEvent(1000, cb_e, CB_IDS[4]);
|
||||
callbacks_done = 0;
|
||||
u64 MAX_CALLBACKS = 10;
|
||||
for (std::size_t i = 0; i < 10; i++) {
|
||||
core_timing.ScheduleEvent(i * 3333U, empty_callback, 0);
|
||||
}
|
||||
|
||||
// Enter slice 0
|
||||
core_timing.Advance();
|
||||
REQUIRE(1000 == core_timing.GetDowncount());
|
||||
const s64 advances = MAX_SLICE_LENGTH / 10;
|
||||
core_timing.ResetRun();
|
||||
u64 current_time = core_timing.GetTicks();
|
||||
bool keep_running{};
|
||||
do {
|
||||
keep_running = false;
|
||||
for (u32 active_core = 0; active_core < 4; ++active_core) {
|
||||
core_timing.SwitchContext(active_core);
|
||||
if (core_timing.CanCurrentContextRun()) {
|
||||
core_timing.AddTicks(std::min<s64>(advances, core_timing.GetDowncount()));
|
||||
core_timing.Advance();
|
||||
}
|
||||
keep_running |= core_timing.CanCurrentContextRun();
|
||||
}
|
||||
} while (keep_running);
|
||||
u64 current_time_2 = core_timing.GetTicks();
|
||||
|
||||
callbacks_ran_flags = 0;
|
||||
counter = 0;
|
||||
lateness = 0;
|
||||
core_timing.AddTicks(core_timing.GetDowncount());
|
||||
core_timing.Advance();
|
||||
REQUIRE(MAX_SLICE_LENGTH == core_timing.GetDowncount());
|
||||
REQUIRE(0x1FULL == callbacks_ran_flags.to_ullong());
|
||||
REQUIRE(MAX_CALLBACKS == callbacks_done);
|
||||
REQUIRE(current_time_2 == current_time + MAX_SLICE_LENGTH * 4);
|
||||
}
|
||||
|
||||
TEST_CASE("Core::Timing[PredictableLateness]", "[core]") {
|
||||
@@ -180,13 +137,13 @@ TEST_CASE("Core::Timing[PredictableLateness]", "[core]") {
|
||||
Core::Timing::EventType* cb_b = core_timing.RegisterEvent("callbackB", CallbackTemplate<1>);
|
||||
|
||||
// Enter slice 0
|
||||
core_timing.Advance();
|
||||
core_timing.ResetRun();
|
||||
|
||||
core_timing.ScheduleEvent(100, cb_a, CB_IDS[0]);
|
||||
core_timing.ScheduleEvent(200, cb_b, CB_IDS[1]);
|
||||
|
||||
AdvanceAndCheck(core_timing, 0, 90, 10, -10); // (100 - 10)
|
||||
AdvanceAndCheck(core_timing, 1, MAX_SLICE_LENGTH, 50, -50);
|
||||
AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10)
|
||||
AdvanceAndCheck(core_timing, 1, 1, 50, -50);
|
||||
}
|
||||
|
||||
namespace ChainSchedulingTest {
|
||||
@@ -220,7 +177,7 @@ TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
||||
});
|
||||
|
||||
// Enter slice 0
|
||||
core_timing.Advance();
|
||||
core_timing.ResetRun();
|
||||
|
||||
core_timing.ScheduleEvent(800, cb_a, CB_IDS[0]);
|
||||
core_timing.ScheduleEvent(1000, cb_b, CB_IDS[1]);
|
||||
@@ -229,19 +186,19 @@ TEST_CASE("CoreTiming[ChainScheduling]", "[core]") {
|
||||
REQUIRE(800 == core_timing.GetDowncount());
|
||||
|
||||
reschedules = 3;
|
||||
AdvanceAndCheck(core_timing, 0, 200); // cb_a
|
||||
AdvanceAndCheck(core_timing, 1, 1000); // cb_b, cb_rs
|
||||
AdvanceAndCheck(core_timing, 0, 0); // cb_a
|
||||
AdvanceAndCheck(core_timing, 1, 1); // cb_b, cb_rs
|
||||
REQUIRE(2 == reschedules);
|
||||
|
||||
core_timing.AddTicks(core_timing.GetDowncount());
|
||||
core_timing.Advance(); // cb_rs
|
||||
core_timing.SwitchContext(3);
|
||||
REQUIRE(1 == reschedules);
|
||||
REQUIRE(200 == core_timing.GetDowncount());
|
||||
|
||||
AdvanceAndCheck(core_timing, 2, 800); // cb_c
|
||||
AdvanceAndCheck(core_timing, 2, 3); // cb_c
|
||||
|
||||
core_timing.AddTicks(core_timing.GetDowncount());
|
||||
core_timing.Advance(); // cb_rs
|
||||
REQUIRE(0 == reschedules);
|
||||
REQUIRE(MAX_SLICE_LENGTH == core_timing.GetDowncount());
|
||||
}
|
||||
|
||||
@@ -47,10 +47,20 @@ void Fermi2D::HandleSurfaceCopy() {
|
||||
src_blit_x2 = static_cast<u32>((regs.blit_src_x >> 32) + regs.blit_dst_width);
|
||||
src_blit_y2 = static_cast<u32>((regs.blit_src_y >> 32) + regs.blit_dst_height);
|
||||
}
|
||||
const Common::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2};
|
||||
const Common::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y,
|
||||
regs.blit_dst_x + regs.blit_dst_width,
|
||||
regs.blit_dst_y + regs.blit_dst_height};
|
||||
const u32 dst_blit_x2 = regs.blit_dst_x + regs.blit_dst_width;
|
||||
const u32 dst_blit_y2 = regs.blit_dst_x + regs.blit_dst_height;
|
||||
const u32 excess_src_x2 = std::max<s32>(0, dst_blit_x2 - regs.dst.width);
|
||||
const u32 excess_src_y2 = std::max<s32>(0, dst_blit_y2 - regs.dst.height);
|
||||
const u32 excess_dst_x2 = std::max<s32>(0, src_blit_x2 - regs.src.width);
|
||||
const u32 excess_dst_y2 = std::max<s32>(0, src_blit_y2 - regs.src.height);
|
||||
|
||||
const Common::Rectangle<u32> src_rect{
|
||||
src_blit_x1, src_blit_y1, std::min<u32>(regs.src.width, src_blit_x2) - excess_src_x2,
|
||||
std::min<u32>(regs.src.height, src_blit_y2) - excess_src_y2};
|
||||
const Common::Rectangle<u32> dst_rect{
|
||||
regs.blit_dst_x, regs.blit_dst_y,
|
||||
std::min<u32>(regs.dst.width, dst_blit_x2) - excess_dst_x2,
|
||||
std::min<u32>(regs.dst.height, dst_blit_y2) - excess_dst_y2};
|
||||
Config copy_config;
|
||||
copy_config.operation = regs.operation;
|
||||
copy_config.filter = regs.blit_control.filter;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/memory.h"
|
||||
@@ -17,6 +18,8 @@
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async)
|
||||
: system{system}, renderer{renderer}, is_async{is_async} {
|
||||
auto& rasterizer{renderer.Rasterizer()};
|
||||
@@ -63,6 +66,16 @@ const DmaPusher& GPU::DmaPusher() const {
|
||||
return *dma_pusher;
|
||||
}
|
||||
|
||||
void GPU::WaitFence(u32 syncpoint_id, u32 value) const {
|
||||
// Synced GPU, is always in sync
|
||||
if (!is_async) {
|
||||
return;
|
||||
}
|
||||
MICROPROFILE_SCOPE(GPU_wait);
|
||||
while (syncpoints[syncpoint_id].load(std::memory_order_relaxed) < value) {
|
||||
}
|
||||
}
|
||||
|
||||
void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
|
||||
syncpoints[syncpoint_id]++;
|
||||
std::lock_guard lock{sync_mutex};
|
||||
|
||||
@@ -177,6 +177,12 @@ public:
|
||||
/// Returns a reference to the GPU DMA pusher.
|
||||
Tegra::DmaPusher& DmaPusher();
|
||||
|
||||
// Waits for the GPU to finish working
|
||||
virtual void WaitIdle() const = 0;
|
||||
|
||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
||||
void WaitFence(u32 syncpoint_id, u32 value) const;
|
||||
|
||||
void IncrementSyncPoint(u32 syncpoint_id);
|
||||
|
||||
u32 GetSyncpointValue(u32 syncpoint_id) const;
|
||||
|
||||
@@ -44,4 +44,8 @@ void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) con
|
||||
interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void GPUAsynch::WaitIdle() const {
|
||||
gpu_thread.WaitIdle();
|
||||
}
|
||||
|
||||
} // namespace VideoCommon
|
||||
|
||||
@@ -25,6 +25,7 @@ public:
|
||||
void FlushRegion(CacheAddr addr, u64 size) override;
|
||||
void InvalidateRegion(CacheAddr addr, u64 size) override;
|
||||
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
|
||||
void WaitIdle() const override;
|
||||
|
||||
protected:
|
||||
void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
|
||||
|
||||
@@ -24,6 +24,7 @@ public:
|
||||
void FlushRegion(CacheAddr addr, u64 size) override;
|
||||
void InvalidateRegion(CacheAddr addr, u64 size) override;
|
||||
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
|
||||
void WaitIdle() const override {}
|
||||
|
||||
protected:
|
||||
void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id,
|
||||
|
||||
@@ -5,8 +5,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/frontend/scope_acquire_window_context.h"
|
||||
#include "video_core/dma_pusher.h"
|
||||
#include "video_core/gpu.h"
|
||||
@@ -68,14 +66,10 @@ ThreadManager::~ThreadManager() {
|
||||
|
||||
void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) {
|
||||
thread = std::thread{RunThread, std::ref(renderer), std::ref(dma_pusher), std::ref(state)};
|
||||
synchronization_event = system.CoreTiming().RegisterEvent(
|
||||
"GPUThreadSynch", [this](u64 fence, s64) { state.WaitForSynchronization(fence); });
|
||||
}
|
||||
|
||||
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
|
||||
const u64 fence{PushCommand(SubmitListCommand(std::move(entries)))};
|
||||
const s64 synchronization_ticks{Core::Timing::usToCycles(std::chrono::microseconds{9000})};
|
||||
system.CoreTiming().ScheduleEvent(synchronization_ticks, synchronization_event, fence);
|
||||
PushCommand(SubmitListCommand(std::move(entries)));
|
||||
}
|
||||
|
||||
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
@@ -96,16 +90,15 @@ void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
|
||||
InvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
void ThreadManager::WaitIdle() const {
|
||||
while (state.last_fence > state.signaled_fence.load(std::memory_order_relaxed)) {
|
||||
}
|
||||
}
|
||||
|
||||
u64 ThreadManager::PushCommand(CommandData&& command_data) {
|
||||
const u64 fence{++state.last_fence};
|
||||
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
|
||||
return fence;
|
||||
}
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
void SynchState::WaitForSynchronization(u64 fence) {
|
||||
while (signaled_fence.load() < fence)
|
||||
;
|
||||
}
|
||||
|
||||
} // namespace VideoCommon::GPUThread
|
||||
|
||||
@@ -21,9 +21,6 @@ class DmaPusher;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
namespace Timing {
|
||||
struct EventType;
|
||||
} // namespace Timing
|
||||
} // namespace Core
|
||||
|
||||
namespace VideoCommon::GPUThread {
|
||||
@@ -89,8 +86,6 @@ struct CommandDataContainer {
|
||||
struct SynchState final {
|
||||
std::atomic_bool is_running{true};
|
||||
|
||||
void WaitForSynchronization(u64 fence);
|
||||
|
||||
using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
|
||||
CommandQueue queue;
|
||||
u64 last_fence{};
|
||||
@@ -121,6 +116,9 @@ public:
|
||||
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
|
||||
void FlushAndInvalidateRegion(CacheAddr addr, u64 size);
|
||||
|
||||
// Wait until the gpu thread is idle.
|
||||
void WaitIdle() const;
|
||||
|
||||
private:
|
||||
/// Pushes a command to be executed by the GPU thread
|
||||
u64 PushCommand(CommandData&& command_data);
|
||||
@@ -128,7 +126,6 @@ private:
|
||||
private:
|
||||
SynchState state;
|
||||
Core::System& system;
|
||||
Core::Timing::EventType* synchronization_event{};
|
||||
std::thread thread;
|
||||
std::thread::id thread_id;
|
||||
};
|
||||
|
||||
@@ -93,6 +93,7 @@ static constexpr ConversionArray morton_to_linear_fns = {
|
||||
MortonCopy<true, PixelFormat::DXT23_SRGB>,
|
||||
MortonCopy<true, PixelFormat::DXT45_SRGB>,
|
||||
MortonCopy<true, PixelFormat::BC7U_SRGB>,
|
||||
MortonCopy<true, PixelFormat::R4G4B4A4U>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>,
|
||||
@@ -101,6 +102,16 @@ static constexpr ConversionArray morton_to_linear_fns = {
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_10X8>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_6X6>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_6X6_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_10X10>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_10X10_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_12X12>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_12X12_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_8X6>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_8X6_SRGB>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_6X5>,
|
||||
MortonCopy<true, PixelFormat::ASTC_2D_6X5_SRGB>,
|
||||
MortonCopy<true, PixelFormat::Z32F>,
|
||||
MortonCopy<true, PixelFormat::Z16>,
|
||||
MortonCopy<true, PixelFormat::Z24S8>,
|
||||
@@ -162,6 +173,17 @@ static constexpr ConversionArray linear_to_morton_fns = {
|
||||
MortonCopy<false, PixelFormat::DXT23_SRGB>,
|
||||
MortonCopy<false, PixelFormat::DXT45_SRGB>,
|
||||
MortonCopy<false, PixelFormat::BC7U_SRGB>,
|
||||
MortonCopy<false, PixelFormat::R4G4B4A4U>,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr,
|
||||
|
||||
@@ -40,4 +40,35 @@ void RendererBase::RequestScreenshot(void* data, std::function<void()> callback,
|
||||
renderer_settings.screenshot_requested = true;
|
||||
}
|
||||
|
||||
f32 RendererBase::GetCurrentResultantBrightness() const {
|
||||
return renderer_settings.current_brightness / 2.0f;
|
||||
}
|
||||
|
||||
void RendererBase::SetBacklightStatus(bool enabled, u64 fade_transition_time) {
|
||||
if (fade_transition_time == 0) {
|
||||
// Needed to ensure the renderer recognizes that a change must occur.
|
||||
fade_transition_time = 1;
|
||||
}
|
||||
|
||||
if (enabled && renderer_settings.current_brightness == 0) {
|
||||
renderer_settings.current_brightness = current_brightness_backup;
|
||||
renderer_settings.backlight_fade_time = fade_transition_time;
|
||||
} else if (!enabled && renderer_settings.current_brightness != 0) {
|
||||
current_brightness_backup = renderer_settings.current_brightness;
|
||||
renderer_settings.current_brightness = 0;
|
||||
renderer_settings.backlight_fade_time = fade_transition_time;
|
||||
}
|
||||
}
|
||||
|
||||
bool RendererBase::GetBacklightStatus() const {
|
||||
return renderer_settings.current_brightness != 0;
|
||||
}
|
||||
|
||||
void RendererBase::SetCurrentBrightness(f32 value) {
|
||||
if (value != renderer_settings.current_brightness) {
|
||||
renderer_settings.current_brightness = value * 2.0f;
|
||||
renderer_settings.backlight_fade_time = 1;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace VideoCore
|
||||
|
||||
@@ -28,6 +28,10 @@ struct RendererSettings {
|
||||
void* screenshot_bits;
|
||||
std::function<void()> screenshot_complete_callback;
|
||||
Layout::FramebufferLayout screenshot_framebuffer_layout;
|
||||
|
||||
// Backlight & Brightness
|
||||
std::atomic<f32> current_brightness{1.f};
|
||||
std::atomic<u64> backlight_fade_time{0};
|
||||
};
|
||||
|
||||
class RendererBase : NonCopyable {
|
||||
@@ -86,6 +90,17 @@ public:
|
||||
void RequestScreenshot(void* data, std::function<void()> callback,
|
||||
const Layout::FramebufferLayout& layout);
|
||||
|
||||
// Gets the current brightness, even if it has been changed from the set value. Most of the time
|
||||
// for yuzu this will simply match what was returned, but implementations are free to change the
|
||||
// value in settings.
|
||||
f32 GetCurrentResultantBrightness() const;
|
||||
|
||||
void SetBacklightStatus(bool enabled, u64 fade_transition_time);
|
||||
|
||||
bool GetBacklightStatus() const;
|
||||
|
||||
void SetCurrentBrightness(f32 value);
|
||||
|
||||
protected:
|
||||
Core::Frontend::EmuWindow& render_window; ///< Reference to the render window handle.
|
||||
std::unique_ptr<RasterizerInterface> rasterizer;
|
||||
@@ -97,6 +112,9 @@ protected:
|
||||
private:
|
||||
/// Updates the framebuffer layout of the contained render window handle.
|
||||
void UpdateCurrentFramebufferLayout();
|
||||
|
||||
// Value of brightness before backlight switch used to preserve value.
|
||||
f32 current_brightness_backup;
|
||||
};
|
||||
|
||||
} // namespace VideoCore
|
||||
|
||||
@@ -348,6 +348,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
|
||||
std::lock_guard lock{pages_mutex};
|
||||
const u64 page_start{addr >> Memory::PAGE_BITS};
|
||||
const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS};
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
@@ -230,6 +231,8 @@ private:
|
||||
|
||||
using CachedPageMap = boost::icl::interval_map<u64, int>;
|
||||
CachedPageMap cached_pages;
|
||||
|
||||
std::mutex pages_mutex;
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
|
||||
@@ -111,7 +111,8 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format
|
||||
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
|
||||
true}, // DXT45_SRGB
|
||||
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
|
||||
true}, // BC7U_SRGB
|
||||
true}, // BC7U_SRGB
|
||||
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, ComponentType::UNorm, false}, // R4G4B4A4U
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
|
||||
@@ -120,6 +121,16 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X6
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X6_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X10
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X10_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_12X12
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_12X12_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X6
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X6_SRGB
|
||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5
|
||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5_SRGB
|
||||
|
||||
// Depth formats
|
||||
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
|
||||
|
||||
@@ -54,11 +54,13 @@ in vec2 frag_tex_coord;
|
||||
out vec4 color;
|
||||
|
||||
uniform sampler2D color_texture;
|
||||
uniform vec4 backlight;
|
||||
|
||||
void main() {
|
||||
// Swap RGBA -> ABGR so we don't have to do this on the CPU. This needs to change if we have to
|
||||
// support more framebuffer pixel formats.
|
||||
color = texture(color_texture, frag_tex_coord);
|
||||
// Also multiply the color by the backlight multiplier supplied.
|
||||
color = texture(color_texture, frag_tex_coord) * backlight;
|
||||
}
|
||||
)";
|
||||
|
||||
@@ -102,8 +104,6 @@ RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::Syst
|
||||
RendererOpenGL::~RendererOpenGL() = default;
|
||||
|
||||
void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
system.GetPerfStats().EndSystemFrame();
|
||||
|
||||
// Maintain the rasterizer's state as a priority
|
||||
OpenGLState prev_state = OpenGLState::GetCurState();
|
||||
state.AllDirty();
|
||||
@@ -123,8 +123,13 @@ void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
// Load the framebuffer from memory, draw it to the screen, and swap buffers
|
||||
LoadFBToScreenInfo(*framebuffer);
|
||||
|
||||
if (renderer_settings.screenshot_requested)
|
||||
if (renderer_settings.screenshot_requested) {
|
||||
CaptureScreenshot();
|
||||
}
|
||||
|
||||
if (renderer_settings.backlight_fade_time > 0) {
|
||||
UpdateBacklight();
|
||||
}
|
||||
|
||||
DrawScreen(render_window.GetFramebufferLayout());
|
||||
|
||||
@@ -135,9 +140,6 @@ void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
|
||||
render_window.PollEvents();
|
||||
|
||||
system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||
system.GetPerfStats().BeginSystemFrame();
|
||||
|
||||
// Restore the rasterizer state
|
||||
prev_state.AllDirty();
|
||||
prev_state.Apply();
|
||||
@@ -210,9 +212,13 @@ void RendererOpenGL::InitOpenGLObjects() {
|
||||
state.Apply();
|
||||
uniform_modelview_matrix = glGetUniformLocation(shader.handle, "modelview_matrix");
|
||||
uniform_color_texture = glGetUniformLocation(shader.handle, "color_texture");
|
||||
uniform_backlight = glGetUniformLocation(shader.handle, "backlight");
|
||||
attrib_position = glGetAttribLocation(shader.handle, "vert_position");
|
||||
attrib_tex_coord = glGetAttribLocation(shader.handle, "vert_tex_coord");
|
||||
|
||||
// Initialize backlight
|
||||
glUniform4f(uniform_backlight, 1.f, 1.f, 1.f, 1.f);
|
||||
|
||||
// Generate VBO handle for drawing
|
||||
vertex_buffer.Create();
|
||||
|
||||
@@ -421,6 +427,29 @@ void RendererOpenGL::CaptureScreenshot() {
|
||||
renderer_settings.screenshot_requested = false;
|
||||
}
|
||||
|
||||
void RendererOpenGL::UpdateBacklight() {
|
||||
constexpr u64 PER_FRAME_FADE_TIME = 1000000000.0f / 60;
|
||||
|
||||
const auto fade_time = renderer_settings.backlight_fade_time.load(std::memory_order_relaxed);
|
||||
auto value = renderer_settings.current_brightness.load(std::memory_order_relaxed);
|
||||
if (fade_time <= PER_FRAME_FADE_TIME) {
|
||||
glUniform4f(uniform_backlight, value, value, value, value);
|
||||
renderer_settings.backlight_fade_time = 0;
|
||||
fade_time_max = 0;
|
||||
} else {
|
||||
if (fade_time_max == 0) {
|
||||
fade_time_max = fade_time;
|
||||
value_max = value;
|
||||
}
|
||||
|
||||
value += (value_max - value) * PER_FRAME_FADE_TIME / fade_time_max;
|
||||
|
||||
glUniform4f(uniform_backlight, value, value, value, value);
|
||||
renderer_settings.backlight_fade_time -= PER_FRAME_FADE_TIME;
|
||||
renderer_settings.current_brightness = value;
|
||||
}
|
||||
}
|
||||
|
||||
static const char* GetSource(GLenum source) {
|
||||
#define RET(s) \
|
||||
case GL_DEBUG_SOURCE_##s: \
|
||||
|
||||
@@ -70,6 +70,7 @@ private:
|
||||
void UpdateFramerate();
|
||||
|
||||
void CaptureScreenshot();
|
||||
void UpdateBacklight();
|
||||
|
||||
// Loads framebuffer from emulated memory into the display information structure
|
||||
void LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuffer);
|
||||
@@ -97,6 +98,7 @@ private:
|
||||
// Shader uniform location indices
|
||||
GLuint uniform_modelview_matrix;
|
||||
GLuint uniform_color_texture;
|
||||
GLuint uniform_backlight;
|
||||
|
||||
// Shader attribute input indices
|
||||
GLuint attrib_position;
|
||||
@@ -105,6 +107,10 @@ private:
|
||||
/// Used for transforming the framebuffer orientation
|
||||
Tegra::FramebufferConfig::TransformFlags framebuffer_transform_flags;
|
||||
Common::Rectangle<int> framebuffer_crop_rect;
|
||||
|
||||
// Used for backlight transitions
|
||||
u64 fade_time_max = 0;
|
||||
f32 value_max = 0;
|
||||
};
|
||||
|
||||
} // namespace OpenGL
|
||||
|
||||
@@ -212,6 +212,14 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case Tegra::Texture::TextureFormat::A4B4G4R4:
|
||||
switch (component_type) {
|
||||
case Tegra::Texture::ComponentType::UNORM:
|
||||
return PixelFormat::R4G4B4A4U;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case Tegra::Texture::TextureFormat::R8:
|
||||
switch (component_type) {
|
||||
case Tegra::Texture::ComponentType::UNORM:
|
||||
@@ -350,6 +358,16 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format,
|
||||
return is_srgb ? PixelFormat::ASTC_2D_8X5_SRGB : PixelFormat::ASTC_2D_8X5;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_10X8:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_10X8_SRGB : PixelFormat::ASTC_2D_10X8;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_6X6:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_6X6_SRGB : PixelFormat::ASTC_2D_6X6;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_10X10:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_10X10_SRGB : PixelFormat::ASTC_2D_10X10;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_12X12:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_12X12_SRGB : PixelFormat::ASTC_2D_12X12;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_8X6:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_8X6_SRGB : PixelFormat::ASTC_2D_8X6;
|
||||
case Tegra::Texture::TextureFormat::ASTC_2D_6X5:
|
||||
return is_srgb ? PixelFormat::ASTC_2D_6X5_SRGB : PixelFormat::ASTC_2D_6X5;
|
||||
case Tegra::Texture::TextureFormat::R16_G16:
|
||||
switch (component_type) {
|
||||
case Tegra::Texture::ComponentType::FLOAT:
|
||||
@@ -510,6 +528,16 @@ bool IsPixelFormatASTC(PixelFormat format) {
|
||||
case PixelFormat::ASTC_2D_8X5_SRGB:
|
||||
case PixelFormat::ASTC_2D_10X8:
|
||||
case PixelFormat::ASTC_2D_10X8_SRGB:
|
||||
case PixelFormat::ASTC_2D_6X6:
|
||||
case PixelFormat::ASTC_2D_6X6_SRGB:
|
||||
case PixelFormat::ASTC_2D_10X10:
|
||||
case PixelFormat::ASTC_2D_10X10_SRGB:
|
||||
case PixelFormat::ASTC_2D_12X12:
|
||||
case PixelFormat::ASTC_2D_12X12_SRGB:
|
||||
case PixelFormat::ASTC_2D_8X6:
|
||||
case PixelFormat::ASTC_2D_8X6_SRGB:
|
||||
case PixelFormat::ASTC_2D_6X5:
|
||||
case PixelFormat::ASTC_2D_6X5_SRGB:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@@ -530,6 +558,11 @@ bool IsPixelFormatSRGB(PixelFormat format) {
|
||||
case PixelFormat::ASTC_2D_5X4_SRGB:
|
||||
case PixelFormat::ASTC_2D_5X5_SRGB:
|
||||
case PixelFormat::ASTC_2D_10X8_SRGB:
|
||||
case PixelFormat::ASTC_2D_6X6_SRGB:
|
||||
case PixelFormat::ASTC_2D_10X10_SRGB:
|
||||
case PixelFormat::ASTC_2D_12X12_SRGB:
|
||||
case PixelFormat::ASTC_2D_8X6_SRGB:
|
||||
case PixelFormat::ASTC_2D_6X5_SRGB:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
||||
@@ -67,27 +67,38 @@ enum class PixelFormat {
|
||||
DXT23_SRGB = 49,
|
||||
DXT45_SRGB = 50,
|
||||
BC7U_SRGB = 51,
|
||||
ASTC_2D_4X4_SRGB = 52,
|
||||
ASTC_2D_8X8_SRGB = 53,
|
||||
ASTC_2D_8X5_SRGB = 54,
|
||||
ASTC_2D_5X4_SRGB = 55,
|
||||
ASTC_2D_5X5 = 56,
|
||||
ASTC_2D_5X5_SRGB = 57,
|
||||
ASTC_2D_10X8 = 58,
|
||||
ASTC_2D_10X8_SRGB = 59,
|
||||
R4G4B4A4U = 52,
|
||||
ASTC_2D_4X4_SRGB = 53,
|
||||
ASTC_2D_8X8_SRGB = 54,
|
||||
ASTC_2D_8X5_SRGB = 55,
|
||||
ASTC_2D_5X4_SRGB = 56,
|
||||
ASTC_2D_5X5 = 57,
|
||||
ASTC_2D_5X5_SRGB = 58,
|
||||
ASTC_2D_10X8 = 59,
|
||||
ASTC_2D_10X8_SRGB = 60,
|
||||
ASTC_2D_6X6 = 61,
|
||||
ASTC_2D_6X6_SRGB = 62,
|
||||
ASTC_2D_10X10 = 63,
|
||||
ASTC_2D_10X10_SRGB = 64,
|
||||
ASTC_2D_12X12 = 65,
|
||||
ASTC_2D_12X12_SRGB = 66,
|
||||
ASTC_2D_8X6 = 67,
|
||||
ASTC_2D_8X6_SRGB = 68,
|
||||
ASTC_2D_6X5 = 69,
|
||||
ASTC_2D_6X5_SRGB = 70,
|
||||
|
||||
MaxColorFormat,
|
||||
|
||||
// Depth formats
|
||||
Z32F = 60,
|
||||
Z16 = 61,
|
||||
Z32F = 71,
|
||||
Z16 = 72,
|
||||
|
||||
MaxDepthFormat,
|
||||
|
||||
// DepthStencil formats
|
||||
Z24S8 = 62,
|
||||
S8Z24 = 63,
|
||||
Z32FS8 = 64,
|
||||
Z24S8 = 73,
|
||||
S8Z24 = 74,
|
||||
Z32FS8 = 75,
|
||||
|
||||
MaxDepthStencilFormat,
|
||||
|
||||
@@ -177,6 +188,7 @@ constexpr std::array<u32, MaxPixelFormat> compression_factor_shift_table = {{
|
||||
2, // DXT23_SRGB
|
||||
2, // DXT45_SRGB
|
||||
2, // BC7U_SRGB
|
||||
0, // R4G4B4A4U
|
||||
2, // ASTC_2D_4X4_SRGB
|
||||
2, // ASTC_2D_8X8_SRGB
|
||||
2, // ASTC_2D_8X5_SRGB
|
||||
@@ -185,6 +197,16 @@ constexpr std::array<u32, MaxPixelFormat> compression_factor_shift_table = {{
|
||||
2, // ASTC_2D_5X5_SRGB
|
||||
2, // ASTC_2D_10X8
|
||||
2, // ASTC_2D_10X8_SRGB
|
||||
2, // ASTC_2D_6X6
|
||||
2, // ASTC_2D_6X6_SRGB
|
||||
2, // ASTC_2D_10X10
|
||||
2, // ASTC_2D_10X10_SRGB
|
||||
2, // ASTC_2D_12X12
|
||||
2, // ASTC_2D_12X12_SRGB
|
||||
2, // ASTC_2D_8X6
|
||||
2, // ASTC_2D_8X6_SRGB
|
||||
2, // ASTC_2D_6X5
|
||||
2, // ASTC_2D_6X5_SRGB
|
||||
0, // Z32F
|
||||
0, // Z16
|
||||
0, // Z24S8
|
||||
@@ -261,6 +283,7 @@ constexpr std::array<u32, MaxPixelFormat> block_width_table = {{
|
||||
4, // DXT23_SRGB
|
||||
4, // DXT45_SRGB
|
||||
4, // BC7U_SRGB
|
||||
1, // R4G4B4A4U
|
||||
4, // ASTC_2D_4X4_SRGB
|
||||
8, // ASTC_2D_8X8_SRGB
|
||||
8, // ASTC_2D_8X5_SRGB
|
||||
@@ -269,6 +292,16 @@ constexpr std::array<u32, MaxPixelFormat> block_width_table = {{
|
||||
5, // ASTC_2D_5X5_SRGB
|
||||
10, // ASTC_2D_10X8
|
||||
10, // ASTC_2D_10X8_SRGB
|
||||
6, // ASTC_2D_6X6
|
||||
6, // ASTC_2D_6X6_SRGB
|
||||
10, // ASTC_2D_10X10
|
||||
10, // ASTC_2D_10X10_SRGB
|
||||
12, // ASTC_2D_12X12
|
||||
12, // ASTC_2D_12X12_SRGB
|
||||
8, // ASTC_2D_8X6
|
||||
8, // ASTC_2D_8X6_SRGB
|
||||
6, // ASTC_2D_6X5
|
||||
6, // ASTC_2D_6X5_SRGB
|
||||
1, // Z32F
|
||||
1, // Z16
|
||||
1, // Z24S8
|
||||
@@ -285,71 +318,82 @@ static constexpr u32 GetDefaultBlockWidth(PixelFormat format) {
|
||||
}
|
||||
|
||||
constexpr std::array<u32, MaxPixelFormat> block_height_table = {{
|
||||
1, // ABGR8U
|
||||
1, // ABGR8S
|
||||
1, // ABGR8UI
|
||||
1, // B5G6R5U
|
||||
1, // A2B10G10R10U
|
||||
1, // A1B5G5R5U
|
||||
1, // R8U
|
||||
1, // R8UI
|
||||
1, // RGBA16F
|
||||
1, // RGBA16U
|
||||
1, // RGBA16UI
|
||||
1, // R11FG11FB10F
|
||||
1, // RGBA32UI
|
||||
4, // DXT1
|
||||
4, // DXT23
|
||||
4, // DXT45
|
||||
4, // DXN1
|
||||
4, // DXN2UNORM
|
||||
4, // DXN2SNORM
|
||||
4, // BC7U
|
||||
4, // BC6H_UF16
|
||||
4, // BC6H_SF16
|
||||
4, // ASTC_2D_4X4
|
||||
1, // BGRA8
|
||||
1, // RGBA32F
|
||||
1, // RG32F
|
||||
1, // R32F
|
||||
1, // R16F
|
||||
1, // R16U
|
||||
1, // R16S
|
||||
1, // R16UI
|
||||
1, // R16I
|
||||
1, // RG16
|
||||
1, // RG16F
|
||||
1, // RG16UI
|
||||
1, // RG16I
|
||||
1, // RG16S
|
||||
1, // RGB32F
|
||||
1, // RGBA8_SRGB
|
||||
1, // RG8U
|
||||
1, // RG8S
|
||||
1, // RG32UI
|
||||
1, // RGBX16F
|
||||
1, // R32UI
|
||||
8, // ASTC_2D_8X8
|
||||
5, // ASTC_2D_8X5
|
||||
4, // ASTC_2D_5X4
|
||||
1, // BGRA8_SRGB
|
||||
4, // DXT1_SRGB
|
||||
4, // DXT23_SRGB
|
||||
4, // DXT45_SRGB
|
||||
4, // BC7U_SRGB
|
||||
4, // ASTC_2D_4X4_SRGB
|
||||
8, // ASTC_2D_8X8_SRGB
|
||||
5, // ASTC_2D_8X5_SRGB
|
||||
4, // ASTC_2D_5X4_SRGB
|
||||
5, // ASTC_2D_5X5
|
||||
5, // ASTC_2D_5X5_SRGB
|
||||
8, // ASTC_2D_10X8
|
||||
8, // ASTC_2D_10X8_SRGB
|
||||
1, // Z32F
|
||||
1, // Z16
|
||||
1, // Z24S8
|
||||
1, // S8Z24
|
||||
1, // Z32FS8
|
||||
1, // ABGR8U
|
||||
1, // ABGR8S
|
||||
1, // ABGR8UI
|
||||
1, // B5G6R5U
|
||||
1, // A2B10G10R10U
|
||||
1, // A1B5G5R5U
|
||||
1, // R8U
|
||||
1, // R8UI
|
||||
1, // RGBA16F
|
||||
1, // RGBA16U
|
||||
1, // RGBA16UI
|
||||
1, // R11FG11FB10F
|
||||
1, // RGBA32UI
|
||||
4, // DXT1
|
||||
4, // DXT23
|
||||
4, // DXT45
|
||||
4, // DXN1
|
||||
4, // DXN2UNORM
|
||||
4, // DXN2SNORM
|
||||
4, // BC7U
|
||||
4, // BC6H_UF16
|
||||
4, // BC6H_SF16
|
||||
4, // ASTC_2D_4X4
|
||||
1, // BGRA8
|
||||
1, // RGBA32F
|
||||
1, // RG32F
|
||||
1, // R32F
|
||||
1, // R16F
|
||||
1, // R16U
|
||||
1, // R16S
|
||||
1, // R16UI
|
||||
1, // R16I
|
||||
1, // RG16
|
||||
1, // RG16F
|
||||
1, // RG16UI
|
||||
1, // RG16I
|
||||
1, // RG16S
|
||||
1, // RGB32F
|
||||
1, // RGBA8_SRGB
|
||||
1, // RG8U
|
||||
1, // RG8S
|
||||
1, // RG32UI
|
||||
1, // RGBX16F
|
||||
1, // R32UI
|
||||
8, // ASTC_2D_8X8
|
||||
5, // ASTC_2D_8X5
|
||||
4, // ASTC_2D_5X4
|
||||
1, // BGRA8_SRGB
|
||||
4, // DXT1_SRGB
|
||||
4, // DXT23_SRGB
|
||||
4, // DXT45_SRGB
|
||||
4, // BC7U_SRGB
|
||||
1, // R4G4B4A4U
|
||||
4, // ASTC_2D_4X4_SRGB
|
||||
8, // ASTC_2D_8X8_SRGB
|
||||
5, // ASTC_2D_8X5_SRGB
|
||||
4, // ASTC_2D_5X4_SRGB
|
||||
5, // ASTC_2D_5X5
|
||||
5, // ASTC_2D_5X5_SRGB
|
||||
8, // ASTC_2D_10X8
|
||||
8, // ASTC_2D_10X8_SRGB
|
||||
6, // ASTC_2D_6X6
|
||||
6, // ASTC_2D_6X6_SRGB
|
||||
10, // ASTC_2D_10X10
|
||||
10, // ASTC_2D_10X10_SRGB
|
||||
12, // ASTC_2D_12X12
|
||||
12, // ASTC_2D_12X12_SRGB
|
||||
6, // ASTC_2D_8X6
|
||||
6, // ASTC_2D_8X6_SRGB
|
||||
5, // ASTC_2D_6X5
|
||||
5, // ASTC_2D_6X5_SRGB
|
||||
1, // Z32F
|
||||
1, // Z16
|
||||
1, // Z24S8
|
||||
1, // S8Z24
|
||||
1, // Z32FS8
|
||||
}};
|
||||
|
||||
static constexpr u32 GetDefaultBlockHeight(PixelFormat format) {
|
||||
@@ -413,6 +457,7 @@ constexpr std::array<u32, MaxPixelFormat> bpp_table = {{
|
||||
128, // DXT23_SRGB
|
||||
128, // DXT45_SRGB
|
||||
128, // BC7U
|
||||
16, // R4G4B4A4U
|
||||
128, // ASTC_2D_4X4_SRGB
|
||||
128, // ASTC_2D_8X8_SRGB
|
||||
128, // ASTC_2D_8X5_SRGB
|
||||
@@ -421,6 +466,16 @@ constexpr std::array<u32, MaxPixelFormat> bpp_table = {{
|
||||
128, // ASTC_2D_5X5_SRGB
|
||||
128, // ASTC_2D_10X8
|
||||
128, // ASTC_2D_10X8_SRGB
|
||||
128, // ASTC_2D_6X6
|
||||
128, // ASTC_2D_6X6_SRGB
|
||||
128, // ASTC_2D_10X10
|
||||
128, // ASTC_2D_10X10_SRGB
|
||||
128, // ASTC_2D_12X12
|
||||
128, // ASTC_2D_12X12_SRGB
|
||||
128, // ASTC_2D_8X6
|
||||
128, // ASTC_2D_8X6_SRGB
|
||||
128, // ASTC_2D_6X5
|
||||
128, // ASTC_2D_6X5_SRGB
|
||||
32, // Z32F
|
||||
16, // Z16
|
||||
32, // Z24S8
|
||||
@@ -504,6 +559,7 @@ constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table
|
||||
SurfaceCompression::Compressed, // DXT23_SRGB
|
||||
SurfaceCompression::Compressed, // DXT45_SRGB
|
||||
SurfaceCompression::Compressed, // BC7U_SRGB
|
||||
SurfaceCompression::None, // R4G4B4A4U
|
||||
SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB
|
||||
@@ -512,6 +568,16 @@ constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table
|
||||
SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_10X8
|
||||
SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_6X6
|
||||
SurfaceCompression::Converted, // ASTC_2D_6X6_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_10X10
|
||||
SurfaceCompression::Converted, // ASTC_2D_10X10_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_12X12
|
||||
SurfaceCompression::Converted, // ASTC_2D_12X12_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_8X6
|
||||
SurfaceCompression::Converted, // ASTC_2D_8X6_SRGB
|
||||
SurfaceCompression::Converted, // ASTC_2D_6X5
|
||||
SurfaceCompression::Converted, // ASTC_2D_6X5_SRGB
|
||||
SurfaceCompression::None, // Z32F
|
||||
SurfaceCompression::None, // Z16
|
||||
SurfaceCompression::None, // Z24S8
|
||||
|
||||
@@ -62,6 +62,8 @@ ConfigureGraphics::ConfigureGraphics(QWidget* parent)
|
||||
}
|
||||
UpdateBackgroundColorButton(new_bg_color);
|
||||
});
|
||||
connect(ui->brightness_reset, &QPushButton::pressed, this,
|
||||
[this] { ui->brightness_slider->setValue(100); });
|
||||
}
|
||||
|
||||
ConfigureGraphics::~ConfigureGraphics() = default;
|
||||
@@ -80,6 +82,7 @@ void ConfigureGraphics::SetConfiguration() {
|
||||
ui->force_30fps_mode->setChecked(Settings::values.force_30fps_mode);
|
||||
UpdateBackgroundColorButton(QColor::fromRgbF(Settings::values.bg_red, Settings::values.bg_green,
|
||||
Settings::values.bg_blue));
|
||||
ui->brightness_slider->setValue(Settings::values.backlight_brightness * 100 + 50);
|
||||
}
|
||||
|
||||
void ConfigureGraphics::ApplyConfiguration() {
|
||||
@@ -93,6 +96,7 @@ void ConfigureGraphics::ApplyConfiguration() {
|
||||
Settings::values.bg_red = static_cast<float>(bg_color.redF());
|
||||
Settings::values.bg_green = static_cast<float>(bg_color.greenF());
|
||||
Settings::values.bg_blue = static_cast<float>(bg_color.blueF());
|
||||
Settings::values.backlight_brightness = (ui->brightness_slider->value() - 50.0f) / 100.0f;
|
||||
}
|
||||
|
||||
void ConfigureGraphics::changeEvent(QEvent* event) {
|
||||
|
||||
@@ -111,6 +111,68 @@
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_3">
|
||||
<item>
|
||||
<widget class="QLabel" name="label_2">
|
||||
<property name="text">
|
||||
<string>Brightness</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<spacer name="horizontalSpacer">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="sizeHint" stdset="0">
|
||||
<size>
|
||||
<width>40</width>
|
||||
<height>20</height>
|
||||
</size>
|
||||
</property>
|
||||
</spacer>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QSlider" name="brightness_slider">
|
||||
<property name="minimum">
|
||||
<number>50</number>
|
||||
</property>
|
||||
<property name="maximum">
|
||||
<number>150</number>
|
||||
</property>
|
||||
<property name="singleStep">
|
||||
<number>10</number>
|
||||
</property>
|
||||
<property name="pageStep">
|
||||
<number>20</number>
|
||||
</property>
|
||||
<property name="value">
|
||||
<number>100</number>
|
||||
</property>
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="tickPosition">
|
||||
<enum>QSlider::NoTicks</enum>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="brightness_reset">
|
||||
<property name="sizePolicy">
|
||||
<sizepolicy hsizetype="Fixed" vsizetype="Fixed">
|
||||
<horstretch>0</horstretch>
|
||||
<verstretch>0</verstretch>
|
||||
</sizepolicy>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Reset</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
|
||||
@@ -66,10 +66,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
|
||||
};
|
||||
|
||||
const auto& system = Core::System::GetInstance();
|
||||
add_threads(system.Scheduler(0).GetThreadList());
|
||||
add_threads(system.Scheduler(1).GetThreadList());
|
||||
add_threads(system.Scheduler(2).GetThreadList());
|
||||
add_threads(system.Scheduler(3).GetThreadList());
|
||||
add_threads(system.GlobalScheduler().GetThreadList());
|
||||
|
||||
return item_list;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user