Compare commits

..

1 Commits

Author SHA1 Message Date
Morph
ea7a0d4652 registered_cache: Prevent nullptr dereference when accumulating files
For whatever reason, nca_file/dir can be nullptr in the list of files/dirs. I have not determined the cause of this yet, so add a nullptr check for these prior to dereferencing them.
2022-03-27 17:06:27 -04:00
62 changed files with 14 additions and 1406 deletions

3
.gitmodules vendored
View File

@@ -43,6 +43,3 @@
[submodule "externals/ffmpeg/ffmpeg"]
path = externals/ffmpeg/ffmpeg
url = https://git.ffmpeg.org/ffmpeg.git
[submodule "externals/range-v3"]
path = externals/range-v3
url = https://github.com/ericniebler/range-v3.git

View File

@@ -6,14 +6,7 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/externals/find-module
include(DownloadExternals)
include(CMakeDependentOption)
if (APPLE)
# Hack for M1. Currently fails when compiling for arm64
set(CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "")
set(CMAKE_CROSSCOMPILING TRUE)
project(yuzu)
else()
project(yuzu)
endif()
project(yuzu)
# Set bundled sdl2/qt as dependent options.
# OFF by default, but if ENABLE_SDL2 and MSVC are true then ON
@@ -560,18 +553,8 @@ find_package(Threads REQUIRED)
if (APPLE)
# Umbrella framework for everything GUI-related
find_library(APPKIT_LIBRARY AppKit)
find_library(APPSERV_LIBRARY ApplicationServices)
find_library(CARBON_LIBRARY Carbon)
find_library(COCOA_LIBRARY Cocoa)
find_library(COREFOUNDATION_LIBRARY CoreFoundation)
find_library(CORESERV_LIBRARY CoreServices)
find_library(FOUNDATION_LIBRARY Foundation)
find_library(IOK_LIBRARY IOKit)
set(PLATFORM_LIBRARIES ${COCOA_LIBRARY} ${IOKIT_LIBRARY} ${COREVIDEO_LIBRARY})
set(SDL_FILE ON)
include_directories("/usr/local/opt/zstd/include")
elseif (WIN32)
# WSAPoll and SHGetKnownFolderPath (AppData/Roaming) didn't exist before WinNT 6.x (Vista)
add_definitions(-D_WIN32_WINNT=0x0600 -DWINVER=0x0600)

View File

@@ -127,8 +127,3 @@ if (YUZU_USE_BUNDLED_FFMPEG)
set(FFmpeg_LIBRARIES "${FFmpeg_LIBRARIES}" PARENT_SCOPE)
set(FFmpeg_INCLUDE_DIR "${FFmpeg_INCLUDE_DIR}" PARENT_SCOPE)
endif()
if (APPLE)
add_library(range_v3 INTERFACE)
set_target_properties(range_v3 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${PROJECT_SOURCE_DIR}/externals/range-v3/include)
endif()

View File

@@ -137,7 +137,6 @@ if (NOT WIN32)
--disable-network
--disable-postproc
--disable-swresample
--disable-iconv
--enable-decoder=h264
--enable-decoder=vp8
--enable-decoder=vp9

1
externals/range-v3 vendored

Submodule externals/range-v3 deleted from d800a03213

View File

@@ -186,7 +186,3 @@ if (MSVC)
else()
target_link_libraries(common PRIVATE zstd)
endif()
if (APPLE)
target_link_libraries(common PUBLIC range_v3)
endif()

View File

@@ -1,108 +0,0 @@
// Copyright 2018 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
// Very hacky :)
#ifdef __APPLE__
#include <bit>
#include <concepts>
#include "common/apple_compat/condition_variable_any2.hpp"
#include "common/apple_compat/jthread.hpp"
#include <range/v3/all.hpp>
// use an external library range-v3 as std's one
namespace std::ranges {
using namespace ::ranges;
}
// adaptation of new features in c++20
namespace std {
constexpr unsigned long long min(const unsigned long long a, const unsigned long long b) {
return (b < a) ? b : a;
}
template <std::unsigned_integral T>
/* requires !std::custom_same_as<T, bool> && !std::custom_same_as<T, char> &&
!std::custom_same_as<T, char8_t> && !std::custom_same_as<T, char16_t> &&
!std::custom_same_as<T, char32_t> && !std::custom_same_as<T, wchar_t>*/
constexpr bool has_single_bit(T x) noexcept {
return x != 0 && (x & (x - 1)) == 0;
}
using condition_variable_any_apple = std::condition_variable_any2;
} // namespace std
// fix sorting with constexpr funcs
// thanks to https://tristanbrindle.com/posts/a-more-useful-compile-time-quicksort
namespace constexprSort {
namespace cstd {
template <typename RAIt>
constexpr RAIt next(RAIt it, typename std::iterator_traits<RAIt>::difference_type n = 1) {
return it + n;
}
template <typename RAIt>
constexpr auto distance(RAIt first, RAIt last) {
return last - first;
}
template <class ForwardIt1, class ForwardIt2>
constexpr void iter_swap(ForwardIt1 a, ForwardIt2 b) {
auto temp = std::move(*a);
*a = std::move(*b);
*b = std::move(temp);
}
template <class InputIt, class UnaryPredicate>
constexpr InputIt find_if_not(InputIt first, InputIt last, UnaryPredicate q) {
for (; first != last; ++first) {
if (!q(*first)) {
return first;
}
}
return last;
}
template <class ForwardIt, class UnaryPredicate>
constexpr ForwardIt partition(ForwardIt first, ForwardIt last, UnaryPredicate p) {
first = cstd::find_if_not(first, last, p);
if (first == last)
return first;
for (ForwardIt i = cstd::next(first); i != last; ++i) {
if (p(*i)) {
cstd::iter_swap(i, first);
++first;
}
}
return first;
}
} // namespace cstd
template <class RAIt, class Compare = std::less<>>
constexpr void quick_sort(RAIt first, RAIt last, Compare cmp = Compare{}) {
auto const N = cstd::distance(first, last);
if (N <= 1)
return;
auto const pivot = *cstd::next(first, N / 2);
auto const middle1 =
cstd::partition(first, last, [=](auto const& elem) { return cmp(elem, pivot); });
auto const middle2 =
cstd::partition(middle1, last, [=](auto const& elem) { return !cmp(pivot, elem); });
quick_sort(first, middle1, cmp); // assert(std::is_sorted(first, middle1, cmp));
quick_sort(middle2, last, cmp); // assert(std::is_sorted(middle2, last, cmp));
}
template <typename Range>
constexpr auto sort(Range&& range) {
quick_sort(std::begin(range), std::end(range));
return range;
}
} // namespace constexprSort
#endif

View File

@@ -1,299 +0,0 @@
// extended standard condition_variable to deal with
// interrupt tokens and jthread
// -----------------------------------------------------
#ifndef CONDITION_VARIABLE2_HPP
#define CONDITION_VARIABLE2_HPP
//*****************************************************************************
// forward declarations are in separate header due to cyclic type dependencies:
//*****************************************************************************
#include "common/apple_compat/stop_token.hpp"
#include <condition_variable>
#include <iostream>
namespace std {
//*****************************************
//* class condition_variable_any2
//* - joining std::thread with interrupt support
//*****************************************
class condition_variable_any2
{
template<typename Lockable>
struct unlock_guard{
unlock_guard(Lockable& mtx_):
mtx(mtx_){
mtx.unlock();
}
~unlock_guard(){
mtx.lock();
}
unlock_guard(unlock_guard const&)=delete;
unlock_guard(unlock_guard&&)=delete;
unlock_guard& operator=(unlock_guard const&)=delete;
unlock_guard& operator=(unlock_guard&&)=delete;
private:
Lockable& mtx;
};
struct cv_internals{
std::mutex m = {};
std::condition_variable cv = {};
void notify_all(){
std::lock_guard<std::mutex> guard(m);
cv.notify_all();
}
void notify_one(){
std::lock_guard<std::mutex> guard(m);
cv.notify_one();
}
};
public:
//*****************************************
//* standardized API for condition_variable_any:
//*****************************************
condition_variable_any2()
: internals{std::make_shared<cv_internals>()} {
}
~condition_variable_any2() {
}
condition_variable_any2(const condition_variable_any2&) = delete;
condition_variable_any2& operator=(const condition_variable_any2&) = delete;
void notify_one() noexcept {
internals->notify_one();
}
void notify_all() noexcept {
internals->notify_all();
}
// wait()
template<typename Lockable>
void wait(Lockable& lock) {
auto local_internals=internals;
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
local_internals->cv.wait(second_internal_lock);
}
template<class Lockable,class Predicate>
void wait(Lockable& lock, Predicate pred) {
// have to manually implement the loop so that the user-provided lock is reacquired before calling pred().
// (otherwise the test_cvrace_pred test case fails)
auto local_internals=internals;
while (!pred()) {
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
local_internals->cv.wait(second_internal_lock);
}
}
// wait_until()
template<class Lockable, class Clock, class Duration>
cv_status wait_until(Lockable& lock,
const chrono::time_point<Clock, Duration>& abs_time) {
auto local_internals=internals;
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
return local_internals->cv.wait_until(second_internal_lock, abs_time);
}
template<class Lockable,class Clock, class Duration, class Predicate>
bool wait_until(Lockable& lock,
const chrono::time_point<Clock, Duration>& abs_time,
Predicate pred) {
// have to manually implement the loop so that the user-provided lock is reacquired before calling pred().
// (otherwise the test_cvrace_pred test case fails)
auto local_internals=internals;
while (!pred()) {
bool shouldStop;
{
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
shouldStop = (local_internals->cv.wait_until(second_internal_lock, abs_time) == std::cv_status::timeout);
}
if (shouldStop) {
return pred();
}
}
return true;
}
// wait_for()
template<class Lockable,class Rep, class Period>
cv_status wait_for(Lockable& lock,
const chrono::duration<Rep, Period>& rel_time) {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time);
}
template<class Lockable,class Rep, class Period, class Predicate>
bool wait_for(Lockable& lock,
const chrono::duration<Rep, Period>& rel_time,
Predicate pred) {
return wait_until(lock, std::chrono::steady_clock::now() + rel_time, std::move(pred));
}
//*****************************************
//* supplementary API:
//*****************************************
// x.6.2.1 dealing with interrupts:
// return:
// - true if pred() yields true
// - false otherwise (i.e. on interrupt)
template <class Lockable,class Predicate>
bool wait(Lockable& lock,
stop_token stoken,
Predicate pred);
// return:
// - true if pred() yields true
// - false otherwise (i.e. on timeout or interrupt)
template <class Lockable, class Clock, class Duration, class Predicate>
bool wait_until(Lockable& lock,
stop_token stoken,
const chrono::time_point<Clock, Duration>& abs_time,
Predicate pred);
// return:
// - true if pred() yields true
// - false otherwise (i.e. on timeout or interrupt)
template <class Lockable, class Rep, class Period, class Predicate>
bool wait_for(Lockable& lock,
stop_token stoken,
const chrono::duration<Rep, Period>& rel_time,
Predicate pred);
//*****************************************
//* implementation:
//*****************************************
private:
//*** API for the starting thread:
std::shared_ptr<cv_internals> internals;
// NOTE (as Howard Hinnant pointed out):
// std::~condition_variable_any() says:
// Requires: There shall be no thread blocked on *this. [Note: That is, all threads shall have been notified;
// they may subsequently block on the lock specified in the wait.
// This relaxes the usual rules, which would have required all wait calls to happen before destruction.
// Only the notification to unblock the wait needs to happen before destruction.
// The user should take care to ensure that no threads wait on *this once the destructor has been started,
// especially when the waiting threads are calling the wait functions in a loop or using the overloads of
// wait, wait_for, or wait_until that take a predicate. ]
// That big long note means ~condition_variable_any() can execute before a signaled thread returns from a wait.
// If this happens with condition_variable_any2, that waiting thread will attempt to lock the destructed mutex mut.
// To fix this, there must be shared ownership of the data member mut between the condition_variable_any object
// and the member functions wait (wait_for, etc.).
// (libc++'s implementation gets this right: https://github.com/llvm-mirror/libcxx/blob/master/include/condition_variable
// It holds the data member mutex with a shared_ptr<mutex> instead of mutex directly, and the wait functions create
// a local shared_ptr<mutex> copy on entry so that if *this destructs out from under the thread executing the wait function,
// the mutex stays alive until the wait function returns.)
};
//*****************************************************************************
//* implementation of class condition_variable_any2
//*****************************************************************************
// wait_until(): wait with interrupt handling
// - returns on interrupt
// return value:
// - true if pred() yields true
// - false otherwise (i.e. on interrupt)
template <class Lockable, class Predicate>
inline bool condition_variable_any2::wait(Lockable& lock,
stop_token stoken,
Predicate pred)
{
if (stoken.stop_requested()) {
return pred();
}
auto local_internals=internals;
stop_callback cb(stoken, [&local_internals] { local_internals->notify_all(); });
while (!pred()) {
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
if (stoken.stop_requested()) {
// pred() has already evaluated to 'false' since we last a acquired 'lock'
return false;
}
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
local_internals->cv.wait(second_internal_lock);
}
return true;
}
// wait_until(): timed wait with interrupt handling
// - returns on interrupt
// return:
// - true if pred() yields true
// - false otherwise (i.e. on timeout or interrupt)
template <class Lockable, class Clock, class Duration, class Predicate>
inline bool condition_variable_any2::wait_until(Lockable& lock,
stop_token stoken,
const chrono::time_point<Clock, Duration>& abs_time,
Predicate pred)
{
if (stoken.stop_requested()) {
return pred();
}
// have to manually implement the loop so that the user-provided lock is reacquired before calling pred().
// (otherwise the test_cvrace_pred test case fails)
auto local_internals=internals;
stop_callback cb(stoken, [&local_internals] { local_internals->notify_all(); });
while (!pred()) {
bool shouldStop;
{
std::unique_lock<std::mutex> first_internal_lock(local_internals->m);
if (stoken.stop_requested()) {
// pred() has already evaluated to 'false' since we last acquired 'lock'.
return false;
}
unlock_guard<Lockable> unlocker(lock);
std::unique_lock<std::mutex> second_internal_lock(std::move(first_internal_lock));
const auto status = local_internals->cv.wait_until(second_internal_lock, abs_time);
shouldStop = (status == std::cv_status::timeout) || stoken.stop_requested();
}
if (shouldStop) {
return pred();
}
}
return true;
}
// wait_for(): timed wait with interrupt handling
// - returns on interrupt
// return:
// - true if pred() yields true
// - false otherwise (i.e. on timeout or interrupt)
template <class Lockable,class Rep, class Period, class Predicate>
inline bool condition_variable_any2::wait_for(Lockable& lock,
stop_token stoken,
const chrono::duration<Rep, Period>& rel_time,
Predicate pred)
{
auto abs_time = std::chrono::steady_clock::now() + rel_time;
return wait_until(lock,
std::move(stoken),
abs_time,
std::move(pred));
}
} // std
#endif // CONDITION_VARIABLE2_HPP

View File

@@ -1,174 +0,0 @@
// -----------------------------------------------------
// cooperative interruptable and joining thread:
// -----------------------------------------------------
#ifndef JTHREAD_HPP
#define JTHREAD_HPP
#include "common/apple_compat/stop_token.hpp"
#include <thread>
#include <future>
#include <type_traits>
#include <functional> // for invoke()
#include <iostream> // for debugging output
namespace std {
//*****************************************
//* class jthread
//* - joining std::thread with signaling stop/end support
//*****************************************
class jthread
{
public:
//*****************************************
//* standardized API:
//*****************************************
// - cover full API of std::thread
// to be able to switch from std::thread to std::jthread
// types are those from std::thread:
using id = ::std::thread::id;
using native_handle_type = ::std::thread::native_handle_type;
// construct/copy/destroy:
jthread() noexcept;
//template <typename F, typename... Args> explicit jthread(F&& f, Args&&... args);
// THE constructor that starts the thread:
// - NOTE: does SFINAE out copy constructor semantics
template <typename Callable, typename... Args,
typename = ::std::enable_if_t<!::std::is_same_v<::std::decay_t<Callable>, jthread>>>
explicit jthread(Callable&& cb, Args&&... args);
~jthread();
jthread(const jthread&) = delete;
jthread(jthread&&) noexcept = default;
jthread& operator=(const jthread&) = delete;
jthread& operator=(jthread&&) noexcept;
// members:
void swap(jthread&) noexcept;
bool joinable() const noexcept;
void join();
void detach();
id get_id() const noexcept;
native_handle_type native_handle();
// static members:
static unsigned hardware_concurrency() noexcept {
return ::std::thread::hardware_concurrency();
};
//*****************************************
// - supplementary API:
// - for the calling thread:
[[nodiscard]] stop_source get_stop_source() noexcept;
[[nodiscard]] stop_token get_stop_token() const noexcept;
bool request_stop() noexcept {
return get_stop_source().request_stop();
}
//*****************************************
//* implementation:
//*****************************************
private:
//*** API for the starting thread:
stop_source _stopSource; // stop_source for started thread
::std::thread _thread{}; // started thread (if any)
};
//**********************************************************************
//*****************************************
//* implementation of class jthread
//*****************************************
// default constructor:
inline jthread::jthread() noexcept
: _stopSource{nostopstate} {
}
// THE constructor that starts the thread:
// - NOTE: declaration does SFINAE out copy constructor semantics
template <typename Callable, typename... Args,
typename >
inline jthread::jthread(Callable&& cb, Args&&... args)
: _stopSource{}, // initialize stop_source
_thread{[] (stop_token st, auto&& cb, auto&&... args) { // called lambda in the thread
// perform tasks of the thread:
if constexpr(std::is_invocable_v<Callable, stop_token, Args...>) {
// pass the stop_token as first argument to the started thread:
::std::invoke(::std::forward<decltype(cb)>(cb),
std::move(st),
::std::forward<decltype(args)>(args)...);
}
else {
// started thread does not expect a stop token:
::std::invoke(::std::forward<decltype(cb)>(cb),
::std::forward<decltype(args)>(args)...);
}
},
_stopSource.get_token(), // not captured due to possible races if immediately set
::std::forward<Callable>(cb), // pass callable
::std::forward<Args>(args)... // pass arguments for callable
}
{
}
// move assignment operator:
inline jthread& jthread::operator=(jthread&& t) noexcept {
if (joinable()) { // if not joined/detached, signal stop and wait for end:
request_stop();
join();
}
_thread = std::move(t._thread);
_stopSource = std::move(t._stopSource);
return *this;
}
// destructor:
inline jthread::~jthread() {
if (joinable()) { // if not joined/detached, signal stop and wait for end:
request_stop();
join();
}
}
// others:
inline bool jthread::joinable() const noexcept {
return _thread.joinable();
}
inline void jthread::join() {
_thread.join();
}
inline void jthread::detach() {
_thread.detach();
}
inline typename jthread::id jthread::get_id() const noexcept {
return _thread.get_id();
}
inline typename jthread::native_handle_type jthread::native_handle() {
return _thread.native_handle();
}
inline stop_source jthread::get_stop_source() noexcept {
return _stopSource;
}
inline stop_token jthread::get_stop_token() const noexcept {
return _stopSource.get_token();
}
inline void jthread::swap(jthread& t) noexcept {
std::swap(_stopSource, t._stopSource);
std::swap(_thread, t._thread);
}
} // std
#endif // JTHREAD_HPP

View File

@@ -1,566 +0,0 @@
#pragma once
// <stop_token> header
#include <atomic>
#include <thread>
#include <type_traits>
#include <utility>
#ifdef SAFE
#include <iostream>
#endif
#if defined(__x86_64__) || defined(_M_X64)
#include <immintrin.h>
#endif
namespace std {
inline void __spin_yield() noexcept {
// TODO: Platform-specific code here
#if defined(__x86_64__) || defined(_M_X64)
_mm_pause();
#endif
}
//-----------------------------------------------
// internal types for shared stop state
//-----------------------------------------------
struct __stop_callback_base {
void(*__callback_)(__stop_callback_base*) = nullptr;
__stop_callback_base* __next_ = nullptr;
__stop_callback_base** __prev_ = nullptr;
bool* __isRemoved_ = nullptr;
std::atomic<bool> __callbackFinishedExecuting_{false};
void __execute() noexcept {
__callback_(this);
}
protected:
// it shall only by us who deletes this
// (workaround for virtual __execute() and destructor)
~__stop_callback_base() = default;
};
struct __stop_state {
public:
void __add_token_reference() noexcept {
__state_.fetch_add(__token_ref_increment, std::memory_order_relaxed);
}
void __remove_token_reference() noexcept {
auto __oldState =
__state_.fetch_sub(__token_ref_increment, std::memory_order_acq_rel);
if (__oldState < (__token_ref_increment + __source_ref_increment)) {
delete this;
}
}
void __add_source_reference() noexcept {
__state_.fetch_add(__source_ref_increment, std::memory_order_relaxed);
}
void __remove_source_reference() noexcept {
auto __oldState =
__state_.fetch_sub(__source_ref_increment, std::memory_order_acq_rel);
if (__oldState < (__token_ref_increment + __source_ref_increment)) {
delete this;
}
}
bool __request_stop() noexcept {
if (!__try_lock_and_signal_until_signalled()) {
// Stop has already been requested.
return false;
}
// Set the 'stop_requested' signal and acquired the lock.
__signallingThread_ = std::this_thread::get_id();
while (__head_ != nullptr) {
// Dequeue the head of the queue
auto* __cb = __head_;
__head_ = __cb->__next_;
const bool anyMore = __head_ != nullptr;
if (anyMore) {
__head_->__prev_ = &__head_;
}
// Mark this item as removed from the list.
__cb->__prev_ = nullptr;
// Don't hold lock while executing callback
// so we don't block other threads from deregistering callbacks.
__unlock();
// TRICKY: Need to store a flag on the stack here that the callback
// can use to signal that the destructor was executed inline
// during the call. If the destructor was executed inline then
// it's not safe to dereference __cb after __execute() returns.
// If the destructor runs on some other thread then the other
// thread will block waiting for this thread to signal that the
// callback has finished executing.
bool __isRemoved = false;
__cb->__isRemoved_ = &__isRemoved;
__cb->__execute();
if (!__isRemoved) {
__cb->__isRemoved_ = nullptr;
__cb->__callbackFinishedExecuting_.store(
true, std::memory_order_release);
}
if (!anyMore) {
// This was the last item in the queue when we dequeued it.
// No more items should be added to the queue after we have
// marked the state as interrupted, only removed from the queue.
// Avoid acquring/releasing the lock in this case.
return true;
}
__lock();
}
__unlock();
return true;
}
bool __is_stop_requested() noexcept {
return __is_stop_requested(__state_.load(std::memory_order_acquire));
}
bool __is_stop_requestable() noexcept {
return __is_stop_requestable(__state_.load(std::memory_order_acquire));
}
bool __try_add_callback(
__stop_callback_base* __cb,
bool __incrementRefCountIfSuccessful) noexcept {
std::uint64_t __oldState;
goto __load_state;
do {
goto __check_state;
do {
__spin_yield();
__load_state:
__oldState = __state_.load(std::memory_order_acquire);
__check_state:
if (__is_stop_requested(__oldState)) {
__cb->__execute();
return false;
} else if (!__is_stop_requestable(__oldState)) {
return false;
}
} while (__is_locked(__oldState));
} while (!__state_.compare_exchange_weak(
__oldState, __oldState | __locked_flag, std::memory_order_acquire));
// Push callback onto callback list.
__cb->__next_ = __head_;
if (__cb->__next_ != nullptr) {
__cb->__next_->__prev_ = &__cb->__next_;
}
__cb->__prev_ = &__head_;
__head_ = __cb;
if (__incrementRefCountIfSuccessful) {
__unlock_and_increment_token_ref_count();
} else {
__unlock();
}
// Successfully added the callback.
return true;
}
void __remove_callback(__stop_callback_base* __cb) noexcept {
__lock();
if (__cb->__prev_ != nullptr) {
// Still registered, not yet executed
// Just remove from the list.
*__cb->__prev_ = __cb->__next_;
if (__cb->__next_ != nullptr) {
__cb->__next_->__prev_ = __cb->__prev_;
}
__unlock_and_decrement_token_ref_count();
return;
}
__unlock();
// Callback has either already executed or is executing
// concurrently on another thread.
if (__signallingThread_ == std::this_thread::get_id()) {
// Callback executed on this thread or is still currently executing
// and is deregistering itself from within the callback.
if (__cb->__isRemoved_ != nullptr) {
// Currently inside the callback, let the __request_stop() method
// know the object is about to be destructed and that it should
// not try to access the object when the callback returns.
*__cb->__isRemoved_ = true;
}
} else {
// Callback is currently executing on another thread,
// block until it finishes executing.
while (
!__cb->__callbackFinishedExecuting_.load(std::memory_order_acquire)) {
__spin_yield();
}
}
__remove_token_reference();
}
private:
static bool __is_locked(std::uint64_t __state) noexcept {
return (__state & __locked_flag) != 0;
}
static bool __is_stop_requested(std::uint64_t __state) noexcept {
return (__state & __stop_requested_flag) != 0;
}
static bool __is_stop_requestable(std::uint64_t __state) noexcept {
// Interruptible if it has already been interrupted or if there are
// still interrupt_source instances in existence.
return __is_stop_requested(__state) || (__state >= __source_ref_increment);
}
bool __try_lock_and_signal_until_signalled() noexcept {
std::uint64_t __oldState = __state_.load(std::memory_order_acquire);
do {
if (__is_stop_requested(__oldState))
return false;
while (__is_locked(__oldState)) {
__spin_yield();
__oldState = __state_.load(std::memory_order_acquire);
if (__is_stop_requested(__oldState))
return false;
}
} while (!__state_.compare_exchange_weak(
__oldState,
__oldState | __stop_requested_flag | __locked_flag,
std::memory_order_acq_rel,
std::memory_order_acquire));
return true;
}
void __lock() noexcept {
auto __oldState = __state_.load(std::memory_order_relaxed);
do {
while (__is_locked(__oldState)) {
__spin_yield();
__oldState = __state_.load(std::memory_order_relaxed);
}
} while (!__state_.compare_exchange_weak(
__oldState,
__oldState | __locked_flag,
std::memory_order_acquire,
std::memory_order_relaxed));
}
void __unlock() noexcept {
__state_.fetch_sub(__locked_flag, std::memory_order_release);
}
void __unlock_and_increment_token_ref_count() noexcept {
__state_.fetch_sub(
__locked_flag - __token_ref_increment, std::memory_order_release);
}
void __unlock_and_decrement_token_ref_count() noexcept {
auto __oldState = __state_.fetch_sub(
__locked_flag + __token_ref_increment, std::memory_order_acq_rel);
// Check if new state is less than __token_ref_increment which would
// indicate that this was the last reference.
if (__oldState <
(__locked_flag + __token_ref_increment + __token_ref_increment)) {
delete this;
}
}
static constexpr std::uint64_t __stop_requested_flag = 1u;
static constexpr std::uint64_t __locked_flag = 2u;
static constexpr std::uint64_t __token_ref_increment = 4u;
static constexpr std::uint64_t __source_ref_increment =
static_cast<std::uint64_t>(1u) << 33u;
// bit 0 - stop-requested
// bit 1 - locked
// bits 2-32 - token ref count (31 bits)
// bits 33-63 - source ref count (31 bits)
std::atomic<std::uint64_t> __state_{__source_ref_increment};
__stop_callback_base* __head_ = nullptr;
std::thread::id __signallingThread_{};
};
//-----------------------------------------------
// forward declarations
//-----------------------------------------------
class stop_source;
template <typename _Callback>
class stop_callback;
// std::nostopstate
// - to initialize a stop_source without shared stop state
struct nostopstate_t { explicit nostopstate_t() = default; };
inline constexpr nostopstate_t nostopstate{};
//-----------------------------------------------
// stop_token
//-----------------------------------------------
class stop_token {
public:
// construct:
// - TODO: explicit?
stop_token() noexcept
: __state_(nullptr) {
}
// copy/move/assign/destroy:
stop_token(const stop_token& __it) noexcept
: __state_(__it.__state_) {
if (__state_ != nullptr) {
__state_->__add_token_reference();
}
}
stop_token(stop_token&& __it) noexcept
: __state_(std::exchange(__it.__state_, nullptr)) {
}
~stop_token() {
if (__state_ != nullptr) {
__state_->__remove_token_reference();
}
}
stop_token& operator=(const stop_token& __it) noexcept {
if (__state_ != __it.__state_) {
stop_token __tmp{__it};
swap(__tmp);
}
return *this;
}
stop_token& operator=(stop_token&& __it) noexcept {
stop_token __tmp{std::move(__it)};
swap(__tmp);
return *this;
}
void swap(stop_token& __it) noexcept {
std::swap(__state_, __it.__state_);
}
// stop handling:
[[nodiscard]] bool stop_requested() const noexcept {
return __state_ != nullptr && __state_->__is_stop_requested();
}
[[nodiscard]] bool stop_possible() const noexcept {
return __state_ != nullptr && __state_->__is_stop_requestable();
}
[[nodiscard]] friend bool operator==(
const stop_token& __a,
const stop_token& __b) noexcept {
return __a.__state_ == __b.__state_;
}
[[nodiscard]] friend bool operator!=(
const stop_token& __a,
const stop_token& __b) noexcept {
return __a.__state_ != __b.__state_;
}
private:
friend class stop_source;
template <typename _Callback>
friend class stop_callback;
explicit stop_token(__stop_state* __state) noexcept : __state_(__state) {
if (__state_ != nullptr) {
__state_->__add_token_reference();
}
}
__stop_state* __state_;
};
//-----------------------------------------------
// stop_source
//-----------------------------------------------
class stop_source {
public:
stop_source() : __state_(new __stop_state()) {}
explicit stop_source(nostopstate_t) noexcept : __state_(nullptr) {}
~stop_source() {
if (__state_ != nullptr) {
__state_->__remove_source_reference();
}
}
stop_source(const stop_source& __other) noexcept
: __state_(__other.__state_) {
if (__state_ != nullptr) {
__state_->__add_source_reference();
}
}
stop_source(stop_source&& __other) noexcept
: __state_(std::exchange(__other.__state_, nullptr)) {}
stop_source& operator=(stop_source&& __other) noexcept {
stop_source __tmp{std::move(__other)};
swap(__tmp);
return *this;
}
stop_source& operator=(const stop_source& __other) noexcept {
if (__state_ != __other.__state_) {
stop_source __tmp{__other};
swap(__tmp);
}
return *this;
}
[[nodiscard]] bool stop_requested() const noexcept {
return __state_ != nullptr && __state_->__is_stop_requested();
}
[[nodiscard]] bool stop_possible() const noexcept {
return __state_ != nullptr;
}
bool request_stop() noexcept {
if (__state_ != nullptr) {
return __state_->__request_stop();
}
return false;
}
[[nodiscard]] stop_token get_token() const noexcept {
return stop_token{__state_};
}
void swap(stop_source& __other) noexcept {
std::swap(__state_, __other.__state_);
}
[[nodiscard]] friend bool operator==(
const stop_source& __a,
const stop_source& __b) noexcept {
return __a.__state_ == __b.__state_;
}
[[nodiscard]] friend bool operator!=(
const stop_source& __a,
const stop_source& __b) noexcept {
return __a.__state_ != __b.__state_;
}
private:
__stop_state* __state_;
};
//-----------------------------------------------
// stop_callback
//-----------------------------------------------
template <typename _Callback>
// requires Destructible<_Callback> && Invocable<_Callback>
class [[nodiscard]] stop_callback : private __stop_callback_base {
public:
using callback_type = _Callback;
template <
typename _CB,
std::enable_if_t<std::is_constructible_v<_Callback, _CB>, int> = 0>
// requires Constructible<Callback, C>
explicit stop_callback(const stop_token& __token, _CB&& __cb) noexcept(
std::is_nothrow_constructible_v<_Callback, _CB>)
: __stop_callback_base{[](__stop_callback_base *__that) noexcept {
static_cast<stop_callback*>(__that)->__execute();
}},
__state_(nullptr),
__cb_(static_cast<_CB&&>(__cb)) {
if (__token.__state_ != nullptr &&
__token.__state_->__try_add_callback(this, true)) {
__state_ = __token.__state_;
}
}
template <
typename _CB,
std::enable_if_t<std::is_constructible_v<_Callback, _CB>, int> = 0>
// requires Constructible<Callback, C>
explicit stop_callback(stop_token&& __token, _CB&& __cb) noexcept(
std::is_nothrow_constructible_v<_Callback, _CB>)
: __stop_callback_base{[](__stop_callback_base *__that) noexcept {
static_cast<stop_callback*>(__that)->__execute();
}},
__state_(nullptr),
__cb_(static_cast<_CB&&>(__cb)) {
if (__token.__state_ != nullptr &&
__token.__state_->__try_add_callback(this, false)) {
__state_ = std::exchange(__token.__state_, nullptr);
}
}
~stop_callback() {
#ifdef SAFE
if (__inExecute_.load()) {
std::cerr << "*** OOPS: ~stop_callback() while callback executed\n";
}
#endif
if (__state_ != nullptr) {
__state_->__remove_callback(this);
}
}
stop_callback& operator=(const stop_callback&) = delete;
stop_callback& operator=(stop_callback&&) = delete;
stop_callback(const stop_callback&) = delete;
stop_callback(stop_callback&&) = delete;
private:
void __execute() noexcept {
// Executed in a noexcept context
// If it throws then we call std::terminate().
#ifdef SAFE
__inExecute_.store(true);
__cb_();
__inExecute_.store(false);
#else
__cb_();
#endif
}
__stop_state* __state_;
_Callback __cb_;
#ifdef SAFE
std::atomic<bool> __inExecute_{false};
#endif
};
template<typename _Callback>
stop_callback(stop_token, _Callback) -> stop_callback<_Callback>;
} // namespace std

View File

@@ -2,8 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <vector>
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/logging/log.h"

View File

@@ -4,9 +4,6 @@
#include <algorithm>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/fs/fs_util.h"
namespace Common::FS {
@@ -16,11 +13,7 @@ std::u8string ToU8String(std::string_view utf8_string) {
}
std::u8string BufferToU8String(std::span<const u8> buffer) {
#ifdef __APPLE__
return std::u8string{buffer.begin(), ranges::find(buffer, u8{0})};
#else
return std::u8string{buffer.begin(), std::ranges::find(buffer, u8{0})};
#endif
}
std::u8string_view BufferToU8StringView(std::span<const u8> buffer) {
@@ -32,11 +25,7 @@ std::string ToUTF8String(std::u8string_view u8_string) {
}
std::string BufferToUTF8String(std::span<const u8> buffer) {
#ifdef __APPLE__
return std::string{buffer.begin(), ranges::find(buffer, u8{0})};
#else
return std::string{buffer.begin(), std::ranges::find(buffer, u8{0})};
#endif
}
std::string_view BufferToUTF8StringView(std::span<const u8> buffer) {

View File

@@ -3,7 +3,6 @@
// Refer to the license.txt file included.
#include <algorithm>
#include <sstream>
#include <unordered_map>
#include "common/fs/fs.h"

View File

@@ -10,8 +10,7 @@
#include <windows.h>
#include "common/dynamic_library.h"
#elif defined(__linux__) || defined(__FreeBSD__) || \
defined(__APPLE__) // ^^^ Windows ^^^ vvv Linux vvv
#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
@@ -348,15 +347,13 @@ private:
std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset
};
#elif defined(__linux__) || defined(__FreeBSD__) || \
defined(__APPLE__) // ^^^ Windows ^^^ vvv Linux vvv
#elif defined(__linux__) || defined(__FreeBSD__) // ^^^ Windows ^^^ vvv Linux vvv
class HostMemory::Impl {
public:
explicit Impl(size_t backing_size_, size_t virtual_size_)
: backing_size{backing_size_}, virtual_size{virtual_size_} {
bool good = false;
SCOPE_EXIT({
if (!good) {
Release();
@@ -367,10 +364,6 @@ public:
#if defined(__FreeBSD__) && __FreeBSD__ < 13
// XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30
fd = shm_open(SHM_ANON, O_RDWR, 0600);
#elif defined(__APPLE__)
// Hack since macOS doesn't have SHM_ANON or memfd_create
fd = shm_open("HostMemory", O_RDWR | O_CREAT);
shm_unlink("HostMemory");
#else
fd = memfd_create("HostMemory", 0);
#endif

View File

@@ -5,11 +5,7 @@
#include <atomic>
#include <chrono>
#include <climits>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <thread>
#include <fmt/format.h>

View File

@@ -8,11 +8,7 @@
#include <condition_variable>
#include <functional>
#include <mutex>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <string>
#include <thread>
#include <type_traits>
@@ -106,11 +102,7 @@ public:
private:
std::queue<Task> requests;
std::mutex queue_mutex;
#ifdef __APPLE__
std::condition_variable_any_apple condition;
#else
std::condition_variable_any condition;
#endif
std::condition_variable wait_condition;
std::atomic<size_t> work_scheduled{};
std::atomic<size_t> work_done{};

View File

@@ -8,9 +8,6 @@
// single reader, single writer queue
#include <atomic>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include <condition_variable>
#include <cstddef>
#include <mutex>
@@ -139,12 +136,7 @@ private:
ElementPtr* read_ptr;
std::atomic_size_t size{0};
std::mutex cv_mutex;
#ifdef __APPLE__
std::conditional_t<with_stop_token, std::condition_variable_any_apple, std::condition_variable>
cv;
#else
std::conditional_t<with_stop_token, std::condition_variable_any, std::condition_variable> cv;
#endif
};
// a simple thread-safe,

View File

@@ -762,7 +762,3 @@ if (ARCHITECTURE_x86_64)
)
target_link_libraries(core PRIVATE dynarmic)
endif()
if (APPLE)
target_link_libraries(core PUBLIC range_v3)
endif()

View File

@@ -10,9 +10,6 @@
#include <memory>
#include <thread>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/fiber.h"
#include "common/thread.h"
#include "core/hardware_properties.h"

View File

@@ -7,9 +7,6 @@
#include <optional>
#include <utility>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/logging/log.h"
#include "core/crypto/aes_util.h"
#include "core/crypto/ctr_encryption_layer.h"

View File

@@ -387,15 +387,17 @@ std::vector<NcaID> RegisteredCache::AccumulateFiles() const {
continue;
for (const auto& nca_dir : d2_dir->GetSubdirectories()) {
if (!FollowsNcaIdFormat(nca_dir->GetName()))
if (nca_dir == nullptr || !FollowsNcaIdFormat(nca_dir->GetName())) {
continue;
}
ids.push_back(Common::HexStringToArray<0x10, true>(nca_dir->GetName().substr(0, 0x20)));
}
for (const auto& nca_file : d2_dir->GetFiles()) {
if (!FollowsNcaIdFormat(nca_file->GetName()))
if (nca_file == nullptr || !FollowsNcaIdFormat(nca_file->GetName())) {
continue;
}
ids.push_back(
Common::HexStringToArray<0x10, true>(nca_file->GetName().substr(0, 0x20)));

View File

@@ -5,7 +5,6 @@
#pragma once
#include <functional>
#include <vector>
#include "common/common_types.h"

View File

@@ -6,10 +6,6 @@
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
#include <bit>
#ifdef __APPLE__
#include <boost/atomic/atomic_ref.hpp>
#include <boost/atomic/capabilities.hpp>
#endif
#include "common/assert.h"
#include "common/bit_util.h"
@@ -212,13 +208,8 @@ void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
// Get an atomic reference to the core scheduler's previous thread.
#ifdef __APPLE__
static_assert(BOOST_ATOMIC_ADDRESS_LOCK_FREE);
boost::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
#else
std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
#endif
// Atomically clear the previous thread if it's our target.
KThread* compare = thread;

View File

@@ -6,11 +6,6 @@
#include <atomic>
#ifdef __APPLE__
#include <boost/atomic/atomic_ref.hpp>
#include <boost/atomic/capabilities.hpp>
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
@@ -75,24 +70,14 @@ class KSlabHeapBase : protected impl::KSlabHeapImpl {
private:
size_t m_obj_size{};
uintptr_t m_peak {}
#ifdef __APPLE
alignas(boost::atomic_ref<uintptr_t>::required_alignment);
#else
;
#endif
uintptr_t m_peak{};
uintptr_t m_start{};
uintptr_t m_end{};
private:
void UpdatePeakImpl(uintptr_t obj) {
#ifdef __APPLE__
static_assert(BOOST_ATOMIC_ADDRESS_LOCK_FREE);
boost::atomic_ref<uintptr_t> peak_ref(m_peak);
#else
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
std::atomic_ref<uintptr_t> peak_ref(m_peak);
#endif
const uintptr_t alloc_peak = obj + this->GetObjectSize();
uintptr_t cur_peak = m_peak;

View File

@@ -7,9 +7,6 @@
#include <algorithm>
#include <array>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"

View File

@@ -9,9 +9,6 @@
#include <vector>
#include <queue>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/scope_exit.h"
#include "common/thread.h"
#include "core/hle/kernel/k_session.h"
@@ -32,11 +29,7 @@ private:
std::vector<std::jthread> threads;
std::queue<std::function<void()>> requests;
std::mutex queue_mutex;
#ifdef __APPLE__
std::condition_variable_any_apple condition;
#else
std::condition_variable_any condition;
#endif
const std::string service_name;
};

View File

@@ -82,7 +82,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by ControlCodeMemory
template <ResultCode func(Core::System&, Handle, u32, u64, size_t, Svc::MemoryPermission)>
template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
@@ -327,7 +327,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateCodeMemory
template <ResultCode func(Core::System&, Handle*, u64, size_t)>
template <ResultCode func(Core::System&, Handle*, u64, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;

View File

@@ -7,9 +7,6 @@
#include <fmt/format.h>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"

View File

@@ -4,7 +4,6 @@
#pragma once
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "common/swap.h"

View File

@@ -26,9 +26,6 @@
#error "Unimplemented platform"
#endif
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"

View File

@@ -22,10 +22,6 @@
#include <net/if.h>
#endif
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
namespace Network {
#ifdef _WIN32

View File

@@ -64,7 +64,3 @@ target_link_libraries(input_common PRIVATE usb)
create_target_directory_groups(input_common)
target_link_libraries(input_common PUBLIC core PRIVATE common Boost::boost)
if (APPLE)
target_link_libraries(input_common PUBLIC range_v3)
endif()

View File

@@ -6,11 +6,7 @@
#include <array>
#include <memory>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <string>
#include <thread>

View File

@@ -2,11 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <thread>
#include <fmt/format.h>

View File

@@ -4,11 +4,7 @@
#pragma once
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <thread>
#include "common/vector_math.h"

View File

@@ -419,13 +419,7 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en
SDL_SetHint(SDL_HINT_JOYSTICK_HIDAPI_XBOX, "0");
// If the frontend is going to manage the event loop, then we don't start one here
#ifdef __APPLE__
// macOS will crash the application if any thread but the main one
// tries to interact with the UX in any way.
start_thread = false;
#else
start_thread = SDL_WasInit(SDL_INIT_JOYSTICK | SDL_INIT_GAMECONTROLLER) == 0;
#endif
if (start_thread && SDL_Init(SDL_INIT_JOYSTICK | SDL_INIT_GAMECONTROLLER) < 0) {
LOG_CRITICAL(Input, "SDL_Init failed with: {}", SDL_GetError());
return;

View File

@@ -3,7 +3,6 @@
// Refer to the license.txt file included.
#include <cstring>
#include <sstream>
#include <fmt/format.h>
#include "common/fs/file.h"

View File

@@ -267,7 +267,3 @@ else()
endif()
create_target_directory_groups(shader_recompiler)
if (APPLE)
target_link_libraries(shader_recompiler PUBLIC range_v3)
endif()

View File

@@ -7,10 +7,6 @@
#include <algorithm>
#include <array>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include <fmt/format.h>
#include "shader_recompiler/frontend/ir/type.h"

View File

@@ -7,9 +7,6 @@
#include <bit>
#include <memory>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/common_types.h"
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/maxwell/decode.h"
@@ -66,16 +63,9 @@ constexpr std::array UNORDERED_ENCODINGS{
constexpr auto SortedEncodings() {
std::array encodings{UNORDERED_ENCODINGS};
#ifdef __APPLE__
std::sort(encodings.begin(), encodings.end(),
[](const InstEncoding& lhs, const InstEncoding& rhs) {
return std::popcount(lhs.mask_value.mask) > std::popcount(rhs.mask_value.mask);
});
#else
std::ranges::sort(encodings, [](const InstEncoding& lhs, const InstEncoding& rhs) {
return std::popcount(lhs.mask_value.mask) > std::popcount(rhs.mask_value.mask);
});
#endif
return encodings;
}
constexpr auto ENCODINGS{SortedEncodings()};

View File

@@ -181,9 +181,8 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
result = IR::U32{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm32(0x8000'0000U), result)};
} else if (f2i.dest_format == DestFormat::I64) {
handled_special_case = true;
// fix clang overload resolution on macOS by explicitly defining this
u64 immediate = 0x8000'0000'0000'0000UL;
result = IR::U64{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(immediate), result)};
result = IR::U64{
v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0x8000'0000'0000'0000UL), result)};
}
}
if (!handled_special_case && is_signed) {

View File

@@ -8,10 +8,6 @@
#include <type_traits>
#include <utility>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
namespace Shader {
template <typename T>

View File

@@ -277,7 +277,3 @@ else()
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
)
endif()
if (APPLE)
target_link_libraries(video_core PUBLIC range_v3)
endif()

View File

@@ -533,7 +533,7 @@ private:
const u64* const state_words = Array<type>();
const u64 num_query_words = size / BYTES_PER_WORD + 1;
const u64 word_begin = offset / BYTES_PER_WORD;
const u64 word_end = std::min(word_begin + num_query_words, static_cast<u64>(NumWords()));
const u64 word_end = std::min(word_begin + num_query_words, NumWords());
const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE);
u64 page_index = (offset / BYTES_PER_PAGE) % PAGES_PER_WORD;
for (u64 word_index = word_begin; word_index < word_end; ++word_index, page_index = 0) {
@@ -566,7 +566,7 @@ private:
const u64* const state_words = Array<type>();
const u64 num_query_words = size / BYTES_PER_WORD + 1;
const u64 word_begin = offset / BYTES_PER_WORD;
const u64 word_end = std::min(word_begin + num_query_words, static_cast<u64>(NumWords()));
const u64 word_end = std::min(word_begin + num_query_words, NumWords());
const u64 page_base = offset / BYTES_PER_PAGE;
const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE);
u64 begin = std::numeric_limits<u64>::max();

View File

@@ -11,9 +11,6 @@
#include <thread>
#include <variant>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/threadsafe_queue.h"
#include "video_core/framebuffer_config.h"
@@ -105,11 +102,7 @@ struct SynchState final {
CommandQueue queue;
u64 last_fence{};
std::atomic<u64> signaled_fence{};
#ifdef __APPLE__
std::condition_variable_any_apple cv;
#else
std::condition_variable_any cv;
#endif
};
/// Class used to manage the GPU thread

View File

@@ -7,11 +7,7 @@
#include <functional>
#include <optional>
#include <span>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include "common/common_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"

View File

@@ -13,9 +13,6 @@
#include <glad/glad.h>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/literals.h"
#include "common/logging/log.h"
#include "common/settings.h"

View File

@@ -5,11 +5,7 @@
#pragma once
#include <filesystem>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <unordered_map>
#include "common/common_types.h"

View File

@@ -5,9 +5,6 @@
#include <algorithm>
#include <cstring>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/bit_cast.h"
#include "common/cityhash.h"
#include "common/common_types.h"

View File

@@ -7,9 +7,6 @@
#include <atomic>
#include <thread>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"

View File

@@ -11,9 +11,6 @@
#include <utility>
#include <queue>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/alignment.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
@@ -232,11 +229,7 @@ private:
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex reserve_mutex;
std::mutex work_mutex;
#ifdef __APPLE__
std::condition_variable_any_apple work_cv;
#else
std::condition_variable_any work_cv;
#endif
std::condition_variable wait_cv;
std::jthread worker_thread;
};

View File

@@ -11,11 +11,7 @@
#include <memory>
#include <optional>
#include <span>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#else
#include <stop_token>
#endif
#include <type_traits>
#include <unordered_map>
#include <vector>

View File

@@ -11,9 +11,6 @@
#include <utility>
#include <vector>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/assert.h"
#include "common/common_types.h"

View File

@@ -24,9 +24,6 @@
#include <boost/container/static_vector.hpp>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/common_types.h"
#include "video_core/textures/astc.h"

View File

@@ -6,9 +6,6 @@
#include <array>
#include <vector>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/alignment.h"
#include "common/assert.h"
#include "shader_recompiler/shader_info.h"

View File

@@ -11,9 +11,6 @@
#include <utility>
#include <vector>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/assert.h"
#include "common/literals.h"
#include "common/settings.h"

View File

@@ -7,9 +7,6 @@
#include <span>
#include <vector>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/common_types.h"
#include "common/dynamic_library.h"
#include "common/logging/log.h"

View File

@@ -9,9 +9,6 @@
#include <glad/glad.h>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"

View File

@@ -231,7 +231,6 @@ if (APPLE)
target_sources(yuzu PRIVATE ${MACOSX_ICON})
set_target_properties(yuzu PROPERTIES MACOSX_BUNDLE TRUE)
set_target_properties(yuzu PROPERTIES MACOSX_BUNDLE_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/Info.plist)
target_link_libraries(yuzu PUBLIC range_v3)
elseif(WIN32)
# compile as a win32 gui application instead of a console application
target_link_libraries(yuzu PRIVATE Qt5::WinMain)

View File

@@ -14,9 +14,6 @@
#include <QTouchEvent>
#include <QWidget>
#ifdef __APPLE__
#include "common/apple_compat/appleCompat.h"
#endif
#include "common/thread.h"
#include "core/frontend/emu_window.h"
@@ -106,11 +103,7 @@ private:
bool running = false;
std::stop_source stop_source;
std::mutex running_mutex;
#ifdef __APPLE__
std::condition_variable_any_apple running_cv;
#else
std::condition_variable_any running_cv;
#endif
Common::Event running_wait{};
std::atomic_bool running_guard{false};
Core::System& system;

View File

@@ -296,11 +296,7 @@ GMainWindow::GMainWindow()
ui->action_Fullscreen->setChecked(false);
#if defined(HAVE_SDL2) && !defined(_WIN32)
#ifdef __APPLE__
SDL_InitSubSystem(SDL_INIT_VIDEO | SDL_INIT_JOYSTICK | SDL_INIT_GAMECONTROLLER);
#else
SDL_InitSubSystem(SDL_INIT_VIDEO);
#endif
// SDL disables the screen saver by default, and setting the hint
// SDL_HINT_VIDEO_ALLOW_SCREENSAVER doesn't seem to work, so we just enable the screen saver
// for now.

View File

@@ -52,7 +52,3 @@ if (MSVC)
include(CopyYuzuSDLDeps)
copy_yuzu_SDL_deps(yuzu-cmd)
endif()
if (APPLE)
target_link_libraries(yuzu-cmd PUBLIC range_v3)
endif()